Commit 6f510923 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'char-misc-5.1-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc

Pull char/misc driver fixes from Greg KH:
 "Here are some binder, habanalabs, and vboxguest driver fixes for
  5.1-rc3.

  The Binder fixes resolve some reported issues found by testing, first
  by the selinux developers, and then earlier today by syzbot.

  The habanalabs fixes are all minor, resolving a number of tiny things.

  The vboxguest patches are a bit larger. They resolve the fact that
  virtual box decided to change their api in their latest release in a
  way that broke the existing kernel code, despite saying that they were
  never going to do that. So this is a bit of a "new feature", but is
  good to get merged so that 5.1 will work with the latest release. The
  changes are not large and of course virtual box "swears" they will not
  break this again, but no one is holding their breath here.

  All of these have been in linux-next for a while with no reported
  issues"

* tag 'char-misc-5.1-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc:
  virt: vbox: Implement passing requestor info to the host for VirtualBox 6.0.x
  binder: fix race between munmap() and direct reclaim
  binder: fix BUG_ON found by selinux-testsuite
  habanalabs: cast to expected type
  habanalabs: prevent host crash during suspend/resume
  habanalabs: perform accounting for active CS
  habanalabs: fix mapping with page size bigger than 4KB
  habanalabs: complete user context cleanup before hard reset
  habanalabs: fix bug when mapping very large memory area
  habanalabs: fix MMU number of pages calculation
parents 3467b907 0532a1b0
......@@ -2057,7 +2057,8 @@ static size_t binder_get_object(struct binder_proc *proc,
size_t object_size = 0;
read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
if (read_size < sizeof(*hdr) || !IS_ALIGNED(offset, sizeof(u32)))
if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
!IS_ALIGNED(offset, sizeof(u32)))
return 0;
binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
offset, read_size);
......
......@@ -927,14 +927,13 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
index = page - alloc->pages;
page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
mm = alloc->vma_vm_mm;
if (!mmget_not_zero(mm))
goto err_mmget;
if (!down_write_trylock(&mm->mmap_sem))
goto err_down_write_mmap_sem_failed;
vma = binder_alloc_get_vma(alloc);
if (vma) {
if (!mmget_not_zero(alloc->vma_vm_mm))
goto err_mmget;
mm = alloc->vma_vm_mm;
if (!down_read_trylock(&mm->mmap_sem))
goto err_down_write_mmap_sem_failed;
}
list_lru_isolate(lru, item);
spin_unlock(lock);
......@@ -945,10 +944,9 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
zap_page_range(vma, page_addr, PAGE_SIZE);
trace_binder_unmap_user_end(alloc, index);
up_read(&mm->mmap_sem);
mmput(mm);
}
up_write(&mm->mmap_sem);
mmput(mm);
trace_binder_unmap_kernel_start(alloc, index);
......
......@@ -179,6 +179,12 @@ static void cs_do_release(struct kref *ref)
/* We also need to update CI for internal queues */
if (cs->submitted) {
int cs_cnt = atomic_dec_return(&hdev->cs_active_cnt);
WARN_ONCE((cs_cnt < 0),
"hl%d: error in CS active cnt %d\n",
hdev->id, cs_cnt);
hl_int_hw_queue_update_ci(cs);
spin_lock(&hdev->hw_queues_mirror_lock);
......
......@@ -232,6 +232,7 @@ static int vm_show(struct seq_file *s, void *data)
struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
enum vm_type_t *vm_type;
bool once = true;
u64 j;
int i;
if (!dev_entry->hdev->mmu_enable)
......@@ -260,7 +261,7 @@ static int vm_show(struct seq_file *s, void *data)
} else {
phys_pg_pack = hnode->ptr;
seq_printf(s,
" 0x%-14llx %-10u %-4u\n",
" 0x%-14llx %-10llu %-4u\n",
hnode->vaddr, phys_pg_pack->total_size,
phys_pg_pack->handle);
}
......@@ -282,9 +283,9 @@ static int vm_show(struct seq_file *s, void *data)
phys_pg_pack->page_size);
seq_puts(s, " physical address\n");
seq_puts(s, "---------------------\n");
for (i = 0 ; i < phys_pg_pack->npages ; i++) {
for (j = 0 ; j < phys_pg_pack->npages ; j++) {
seq_printf(s, " 0x%-14llx\n",
phys_pg_pack->pages[i]);
phys_pg_pack->pages[j]);
}
}
spin_unlock(&vm->idr_lock);
......
......@@ -11,6 +11,8 @@
#include <linux/sched/signal.h>
#include <linux/hwmon.h>
#define HL_PLDM_PENDING_RESET_PER_SEC (HL_PENDING_RESET_PER_SEC * 10)
bool hl_device_disabled_or_in_reset(struct hl_device *hdev)
{
if ((hdev->disabled) || (atomic_read(&hdev->in_reset)))
......@@ -216,6 +218,7 @@ static int device_early_init(struct hl_device *hdev)
spin_lock_init(&hdev->hw_queues_mirror_lock);
atomic_set(&hdev->in_reset, 0);
atomic_set(&hdev->fd_open_cnt, 0);
atomic_set(&hdev->cs_active_cnt, 0);
return 0;
......@@ -413,6 +416,27 @@ int hl_device_suspend(struct hl_device *hdev)
pci_save_state(hdev->pdev);
/* Block future CS/VM/JOB completion operations */
rc = atomic_cmpxchg(&hdev->in_reset, 0, 1);
if (rc) {
dev_err(hdev->dev, "Can't suspend while in reset\n");
return -EIO;
}
/* This blocks all other stuff that is not blocked by in_reset */
hdev->disabled = true;
/*
* Flush anyone that is inside the critical section of enqueue
* jobs to the H/W
*/
hdev->asic_funcs->hw_queues_lock(hdev);
hdev->asic_funcs->hw_queues_unlock(hdev);
/* Flush processes that are sending message to CPU */
mutex_lock(&hdev->send_cpu_message_lock);
mutex_unlock(&hdev->send_cpu_message_lock);
rc = hdev->asic_funcs->suspend(hdev);
if (rc)
dev_err(hdev->dev,
......@@ -440,21 +464,38 @@ int hl_device_resume(struct hl_device *hdev)
pci_set_power_state(hdev->pdev, PCI_D0);
pci_restore_state(hdev->pdev);
rc = pci_enable_device(hdev->pdev);
rc = pci_enable_device_mem(hdev->pdev);
if (rc) {
dev_err(hdev->dev,
"Failed to enable PCI device in resume\n");
return rc;
}
pci_set_master(hdev->pdev);
rc = hdev->asic_funcs->resume(hdev);
if (rc) {
dev_err(hdev->dev,
"Failed to enable PCI access from device CPU\n");
return rc;
dev_err(hdev->dev, "Failed to resume device after suspend\n");
goto disable_device;
}
hdev->disabled = false;
atomic_set(&hdev->in_reset, 0);
rc = hl_device_reset(hdev, true, false);
if (rc) {
dev_err(hdev->dev, "Failed to reset device during resume\n");
goto disable_device;
}
return 0;
disable_device:
pci_clear_master(hdev->pdev);
pci_disable_device(hdev->pdev);
return rc;
}
static void hl_device_hard_reset_pending(struct work_struct *work)
......@@ -462,9 +503,16 @@ static void hl_device_hard_reset_pending(struct work_struct *work)
struct hl_device_reset_work *device_reset_work =
container_of(work, struct hl_device_reset_work, reset_work);
struct hl_device *hdev = device_reset_work->hdev;
u16 pending_cnt = HL_PENDING_RESET_PER_SEC;
u16 pending_total, pending_cnt;
struct task_struct *task = NULL;
if (hdev->pldm)
pending_total = HL_PLDM_PENDING_RESET_PER_SEC;
else
pending_total = HL_PENDING_RESET_PER_SEC;
pending_cnt = pending_total;
/* Flush all processes that are inside hl_open */
mutex_lock(&hdev->fd_open_cnt_lock);
......@@ -489,6 +537,19 @@ static void hl_device_hard_reset_pending(struct work_struct *work)
}
}
pending_cnt = pending_total;
while ((atomic_read(&hdev->fd_open_cnt)) && (pending_cnt)) {
pending_cnt--;
ssleep(1);
}
if (atomic_read(&hdev->fd_open_cnt))
dev_crit(hdev->dev,
"Going to hard reset with open user contexts\n");
mutex_unlock(&hdev->fd_open_cnt_lock);
hl_device_reset(hdev, true, true);
......
......@@ -1201,15 +1201,6 @@ static int goya_stop_external_queues(struct hl_device *hdev)
return retval;
}
static void goya_resume_external_queues(struct hl_device *hdev)
{
WREG32(mmDMA_QM_0_GLBL_CFG1, 0);
WREG32(mmDMA_QM_1_GLBL_CFG1, 0);
WREG32(mmDMA_QM_2_GLBL_CFG1, 0);
WREG32(mmDMA_QM_3_GLBL_CFG1, 0);
WREG32(mmDMA_QM_4_GLBL_CFG1, 0);
}
/*
* goya_init_cpu_queues - Initialize PQ/CQ/EQ of CPU
*
......@@ -2178,36 +2169,6 @@ static int goya_stop_internal_queues(struct hl_device *hdev)
return retval;
}
static void goya_resume_internal_queues(struct hl_device *hdev)
{
WREG32(mmMME_QM_GLBL_CFG1, 0);
WREG32(mmMME_CMDQ_GLBL_CFG1, 0);
WREG32(mmTPC0_QM_GLBL_CFG1, 0);
WREG32(mmTPC0_CMDQ_GLBL_CFG1, 0);
WREG32(mmTPC1_QM_GLBL_CFG1, 0);
WREG32(mmTPC1_CMDQ_GLBL_CFG1, 0);
WREG32(mmTPC2_QM_GLBL_CFG1, 0);
WREG32(mmTPC2_CMDQ_GLBL_CFG1, 0);
WREG32(mmTPC3_QM_GLBL_CFG1, 0);
WREG32(mmTPC3_CMDQ_GLBL_CFG1, 0);
WREG32(mmTPC4_QM_GLBL_CFG1, 0);
WREG32(mmTPC4_CMDQ_GLBL_CFG1, 0);
WREG32(mmTPC5_QM_GLBL_CFG1, 0);
WREG32(mmTPC5_CMDQ_GLBL_CFG1, 0);
WREG32(mmTPC6_QM_GLBL_CFG1, 0);
WREG32(mmTPC6_CMDQ_GLBL_CFG1, 0);
WREG32(mmTPC7_QM_GLBL_CFG1, 0);
WREG32(mmTPC7_CMDQ_GLBL_CFG1, 0);
}
static void goya_dma_stall(struct hl_device *hdev)
{
WREG32(mmDMA_QM_0_GLBL_CFG1, 1 << DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT);
......@@ -2905,20 +2866,6 @@ int goya_suspend(struct hl_device *hdev)
{
int rc;
rc = goya_stop_internal_queues(hdev);
if (rc) {
dev_err(hdev->dev, "failed to stop internal queues\n");
return rc;
}
rc = goya_stop_external_queues(hdev);
if (rc) {
dev_err(hdev->dev, "failed to stop external queues\n");
return rc;
}
rc = goya_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS);
if (rc)
dev_err(hdev->dev, "Failed to disable PCI access from CPU\n");
......@@ -2928,15 +2875,7 @@ int goya_suspend(struct hl_device *hdev)
int goya_resume(struct hl_device *hdev)
{
int rc;
goya_resume_external_queues(hdev);
goya_resume_internal_queues(hdev);
rc = goya_send_pci_access_msg(hdev, ARMCP_PACKET_ENABLE_PCI_ACCESS);
if (rc)
dev_err(hdev->dev, "Failed to enable PCI access from CPU\n");
return rc;
return goya_init_iatu(hdev);
}
static int goya_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
......@@ -3070,7 +3009,7 @@ void *goya_get_int_queue_base(struct hl_device *hdev, u32 queue_id,
*dma_handle = hdev->asic_prop.sram_base_address;
base = hdev->pcie_bar[SRAM_CFG_BAR_ID];
base = (void *) hdev->pcie_bar[SRAM_CFG_BAR_ID];
switch (queue_id) {
case GOYA_QUEUE_ID_MME:
......
......@@ -793,11 +793,11 @@ struct hl_vm_hash_node {
* struct hl_vm_phys_pg_pack - physical page pack.
* @vm_type: describes the type of the virtual area descriptor.
* @pages: the physical page array.
* @npages: num physical pages in the pack.
* @total_size: total size of all the pages in this list.
* @mapping_cnt: number of shared mappings.
* @asid: the context related to this list.
* @npages: num physical pages in the pack.
* @page_size: size of each page in the pack.
* @total_size: total size of all the pages in this list.
* @flags: HL_MEM_* flags related to this list.
* @handle: the provided handle related to this list.
* @offset: offset from the first page.
......@@ -807,11 +807,11 @@ struct hl_vm_hash_node {
struct hl_vm_phys_pg_pack {
enum vm_type_t vm_type; /* must be first */
u64 *pages;
u64 npages;
u64 total_size;
atomic_t mapping_cnt;
u32 asid;
u32 npages;
u32 page_size;
u32 total_size;
u32 flags;
u32 handle;
u32 offset;
......@@ -1056,13 +1056,15 @@ struct hl_device_reset_work {
* @cb_pool_lock: protects the CB pool.
* @user_ctx: current user context executing.
* @dram_used_mem: current DRAM memory consumption.
* @in_reset: is device in reset flow.
* @curr_pll_profile: current PLL profile.
* @fd_open_cnt: number of open user processes.
* @timeout_jiffies: device CS timeout value.
* @max_power: the max power of the device, as configured by the sysadmin. This
* value is saved so in case of hard-reset, KMD will restore this
* value and update the F/W after the re-initialization
* @in_reset: is device in reset flow.
* @curr_pll_profile: current PLL profile.
* @fd_open_cnt: number of open user processes.
* @cs_active_cnt: number of active command submissions on this device (active
* means already in H/W queues)
* @major: habanalabs KMD major.
* @high_pll: high PLL profile frequency.
* @soft_reset_cnt: number of soft reset since KMD loading.
......@@ -1128,11 +1130,12 @@ struct hl_device {
struct hl_ctx *user_ctx;
atomic64_t dram_used_mem;
u64 timeout_jiffies;
u64 max_power;
atomic_t in_reset;
atomic_t curr_pll_profile;
atomic_t fd_open_cnt;
u64 timeout_jiffies;
u64 max_power;
atomic_t cs_active_cnt;
u32 major;
u32 high_pll;
u32 soft_reset_cnt;
......
......@@ -370,12 +370,13 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs)
spin_unlock(&hdev->hw_queues_mirror_lock);
}
list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node) {
atomic_inc(&hdev->cs_active_cnt);
list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
if (job->ext_queue)
ext_hw_queue_schedule_job(job);
else
int_hw_queue_schedule_job(job);
}
cs->submitted = true;
......
......@@ -56,9 +56,9 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
struct hl_device *hdev = ctx->hdev;
struct hl_vm *vm = &hdev->vm;
struct hl_vm_phys_pg_pack *phys_pg_pack;
u64 paddr = 0;
u32 total_size, num_pgs, num_curr_pgs, page_size, page_shift;
int handle, rc, i;
u64 paddr = 0, total_size, num_pgs, i;
u32 num_curr_pgs, page_size, page_shift;
int handle, rc;
bool contiguous;
num_curr_pgs = 0;
......@@ -73,7 +73,7 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
paddr = (u64) gen_pool_alloc(vm->dram_pg_pool, total_size);
if (!paddr) {
dev_err(hdev->dev,
"failed to allocate %u huge contiguous pages\n",
"failed to allocate %llu huge contiguous pages\n",
num_pgs);
return -ENOMEM;
}
......@@ -93,7 +93,7 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
phys_pg_pack->flags = args->flags;
phys_pg_pack->contiguous = contiguous;
phys_pg_pack->pages = kcalloc(num_pgs, sizeof(u64), GFP_KERNEL);
phys_pg_pack->pages = kvmalloc_array(num_pgs, sizeof(u64), GFP_KERNEL);
if (!phys_pg_pack->pages) {
rc = -ENOMEM;
goto pages_arr_err;
......@@ -148,7 +148,7 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[i],
page_size);
kfree(phys_pg_pack->pages);
kvfree(phys_pg_pack->pages);
pages_arr_err:
kfree(phys_pg_pack);
pages_pack_err:
......@@ -267,7 +267,7 @@ static void free_phys_pg_pack(struct hl_device *hdev,
struct hl_vm_phys_pg_pack *phys_pg_pack)
{
struct hl_vm *vm = &hdev->vm;
int i;
u64 i;
if (!phys_pg_pack->created_from_userptr) {
if (phys_pg_pack->contiguous) {
......@@ -288,7 +288,7 @@ static void free_phys_pg_pack(struct hl_device *hdev,
}
}
kfree(phys_pg_pack->pages);
kvfree(phys_pg_pack->pages);
kfree(phys_pg_pack);
}
......@@ -519,7 +519,7 @@ static inline int add_va_block(struct hl_device *hdev,
* - Return the start address of the virtual block
*/
static u64 get_va_block(struct hl_device *hdev,
struct hl_va_range *va_range, u32 size, u64 hint_addr,
struct hl_va_range *va_range, u64 size, u64 hint_addr,
bool is_userptr)
{
struct hl_vm_va_block *va_block, *new_va_block = NULL;
......@@ -577,7 +577,8 @@ static u64 get_va_block(struct hl_device *hdev,
}
if (!new_va_block) {
dev_err(hdev->dev, "no available va block for size %u\n", size);
dev_err(hdev->dev, "no available va block for size %llu\n",
size);
goto out;
}
......@@ -648,8 +649,8 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
struct hl_vm_phys_pg_pack *phys_pg_pack;
struct scatterlist *sg;
dma_addr_t dma_addr;
u64 page_mask;
u32 npages, total_npages, page_size = PAGE_SIZE;
u64 page_mask, total_npages;
u32 npages, page_size = PAGE_SIZE;
bool first = true, is_huge_page_opt = true;
int rc, i, j;
......@@ -691,7 +692,8 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
page_mask = ~(((u64) page_size) - 1);
phys_pg_pack->pages = kcalloc(total_npages, sizeof(u64), GFP_KERNEL);
phys_pg_pack->pages = kvmalloc_array(total_npages, sizeof(u64),
GFP_KERNEL);
if (!phys_pg_pack->pages) {
rc = -ENOMEM;
goto page_pack_arr_mem_err;
......@@ -750,9 +752,9 @@ static int map_phys_page_pack(struct hl_ctx *ctx, u64 vaddr,
struct hl_vm_phys_pg_pack *phys_pg_pack)
{
struct hl_device *hdev = ctx->hdev;
u64 next_vaddr = vaddr, paddr;
u64 next_vaddr = vaddr, paddr, mapped_pg_cnt = 0, i;
u32 page_size = phys_pg_pack->page_size;
int i, rc = 0, mapped_pg_cnt = 0;
int rc = 0;
for (i = 0 ; i < phys_pg_pack->npages ; i++) {
paddr = phys_pg_pack->pages[i];
......@@ -764,7 +766,7 @@ static int map_phys_page_pack(struct hl_ctx *ctx, u64 vaddr,
rc = hl_mmu_map(ctx, next_vaddr, paddr, page_size);
if (rc) {
dev_err(hdev->dev,
"map failed for handle %u, npages: %d, mapped: %d",
"map failed for handle %u, npages: %llu, mapped: %llu",
phys_pg_pack->handle, phys_pg_pack->npages,
mapped_pg_cnt);
goto err;
......@@ -985,10 +987,10 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr)
struct hl_vm_hash_node *hnode = NULL;
struct hl_userptr *userptr = NULL;
enum vm_type_t *vm_type;
u64 next_vaddr;
u64 next_vaddr, i;
u32 page_size;
bool is_userptr;
int i, rc;
int rc;
/* protect from double entrance */
mutex_lock(&ctx->mem_hash_lock);
......
......@@ -832,7 +832,7 @@ static int _hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size)
{
struct hl_device *hdev = ctx->hdev;
u64 real_virt_addr;
u64 real_virt_addr, real_phys_addr;
u32 real_page_size, npages;
int i, rc, mapped_cnt = 0;
......@@ -857,14 +857,16 @@ int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size)
npages = page_size / real_page_size;
real_virt_addr = virt_addr;
real_phys_addr = phys_addr;
for (i = 0 ; i < npages ; i++) {
rc = _hl_mmu_map(ctx, real_virt_addr, phys_addr,
rc = _hl_mmu_map(ctx, real_virt_addr, real_phys_addr,
real_page_size);
if (rc)
goto err;
real_virt_addr += real_page_size;
real_phys_addr += real_page_size;
mapped_cnt++;
}
......
This diff is collapsed.
......@@ -154,15 +154,15 @@ struct vbg_session {
* host. Protected by vbg_gdev.session_mutex.
*/
u32 guest_caps;
/** Does this session belong to a root process or a user one? */
bool user_session;
/** VMMDEV_REQUESTOR_* flags */
u32 requestor;
/** Set on CANCEL_ALL_WAITEVENTS, protected by vbg_devevent_spinlock. */
bool cancel_waiters;
};
int vbg_core_init(struct vbg_dev *gdev, u32 fixed_events);
void vbg_core_exit(struct vbg_dev *gdev);
struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, bool user);
struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, u32 requestor);
void vbg_core_close_session(struct vbg_session *session);
int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data);
int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features);
......@@ -172,12 +172,13 @@ irqreturn_t vbg_core_isr(int irq, void *dev_id);
void vbg_linux_mouse_event(struct vbg_dev *gdev);
/* Private (non exported) functions form vboxguest_utils.c */
void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type);
void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type,
u32 requestor);
void vbg_req_free(void *req, size_t len);
int vbg_req_perform(struct vbg_dev *gdev, void *req);
int vbg_hgcm_call32(
struct vbg_dev *gdev, u32 client_id, u32 function, u32 timeout_ms,
struct vmmdev_hgcm_function_parameter32 *parm32, u32 parm_count,
int *vbox_status);
struct vbg_dev *gdev, u32 requestor, u32 client_id, u32 function,
u32 timeout_ms, struct vmmdev_hgcm_function_parameter32 *parm32,
u32 parm_count, int *vbox_status);
#endif
......@@ -5,6 +5,7 @@
* Copyright (C) 2006-2016 Oracle Corporation
*/
#include <linux/cred.h>
#include <linux/input.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
......@@ -28,6 +29,23 @@ static DEFINE_MUTEX(vbg_gdev_mutex);
/** Global vbg_gdev pointer used by vbg_get/put_gdev. */
static struct vbg_dev *vbg_gdev;
static u32 vbg_misc_device_requestor(struct inode *inode)
{
u32 requestor = VMMDEV_REQUESTOR_USERMODE |
VMMDEV_REQUESTOR_CON_DONT_KNOW |
VMMDEV_REQUESTOR_TRUST_NOT_GIVEN;
if (from_kuid(current_user_ns(), current->cred->uid) == 0)
requestor |= VMMDEV_REQUESTOR_USR_ROOT;
else
requestor |= VMMDEV_REQUESTOR_USR_USER;
if (in_egroup_p(inode->i_gid))
requestor |= VMMDEV_REQUESTOR_GRP_VBOX;
return requestor;
}
static int vbg_misc_device_open(struct inode *inode, struct file *filp)
{
struct vbg_session *session;
......@@ -36,7 +54,7 @@ static int vbg_misc_device_open(struct inode *inode, struct file *filp)
/* misc_open sets filp->private_data to our misc device */
gdev = container_of(filp->private_data, struct vbg_dev, misc_device);
session = vbg_core_open_session(gdev, false);
session = vbg_core_open_session(gdev, vbg_misc_device_requestor(inode));
if (IS_ERR(session))
return PTR_ERR(session);
......@@ -53,7 +71,8 @@ static int vbg_misc_device_user_open(struct inode *inode, struct file *filp)
gdev = container_of(filp->private_data, struct vbg_dev,
misc_device_user);
session = vbg_core_open_session(gdev, false);
session = vbg_core_open_session(gdev, vbg_misc_device_requestor(inode) |
VMMDEV_REQUESTOR_USER_DEVICE);
if (IS_ERR(session))
return PTR_ERR(session);
......@@ -115,7 +134,8 @@ static long vbg_misc_device_ioctl(struct file *filp, unsigned int req,
req == VBG_IOCTL_VMMDEV_REQUEST_BIG;
if (is_vmmdev_req)
buf = vbg_req_alloc(size, VBG_IOCTL_HDR_TYPE_DEFAULT);
buf = vbg_req_alloc(size, VBG_IOCTL_HDR_TYPE_DEFAULT,
session->requestor);
else
buf = kmalloc(size, GFP_KERNEL);
if (!buf)
......
......@@ -62,7 +62,8 @@ VBG_LOG(vbg_err, pr_err);
VBG_LOG(vbg_debug, pr_debug);
#endif
void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type)
void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type,
u32 requestor)
{
struct vmmdev_request_header *req;
int order = get_order(PAGE_ALIGN(len));
......@@ -78,7 +79,7 @@ void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type)
req->request_type = req_type;
req->rc = VERR_GENERAL_FAILURE;
req->reserved1 = 0;
req->reserved2 = 0;
req->requestor = requestor;
return req;
}
......@@ -119,7 +120,7 @@ static bool hgcm_req_done(struct vbg_dev *gdev,
return done;
}
int vbg_hgcm_connect(struct vbg_dev *gdev,
int vbg_hgcm_connect(struct vbg_dev *gdev, u32 requestor,
struct vmmdev_hgcm_service_location *loc,
u32 *client_id, int *vbox_status)
{
......@@ -127,7 +128,7 @@ int vbg_hgcm_connect(struct vbg_dev *gdev,
int rc;
hgcm_connect = vbg_req_alloc(sizeof(*hgcm_connect),
VMMDEVREQ_HGCM_CONNECT);
VMMDEVREQ_HGCM_CONNECT, requestor);
if (!hgcm_connect)
return -ENOMEM;
......@@ -153,13 +154,15 @@ int vbg_hgcm_connect(struct vbg_dev *gdev,
}
EXPORT_SYMBOL(vbg_hgcm_connect);
int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 client_id, int *vbox_status)
int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 requestor,
u32 client_id, int *vbox_status)
{
struct vmmdev_hgcm_disconnect *hgcm_disconnect = NULL;
int rc;
hgcm_disconnect = vbg_req_alloc(sizeof(*hgcm_disconnect),
VMMDEVREQ_HGCM_DISCONNECT);
VMMDEVREQ_HGCM_DISCONNECT,
requestor);
if (!hgcm_disconnect)
return -ENOMEM;
......@@ -593,9 +596,10 @@ static int hgcm_call_copy_back_result(
return 0;
}
int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function,
u32 timeout_ms, struct vmmdev_hgcm_function_parameter *parms,
u32 parm_count, int *vbox_status)
int vbg_hgcm_call(struct vbg_dev *gdev, u32 requestor, u32 client_id,
u32 function, u32 timeout_ms,
struct vmmdev_hgcm_function_parameter *parms, u32 parm_count,
int *vbox_status)
{
struct vmmdev_hgcm_call *call;
void **bounce_bufs = NULL;
......@@ -615,7 +619,7 @@ int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function,
goto free_bounce_bufs;
}
call = vbg_req_alloc(size, VMMDEVREQ_HGCM_CALL);
call = vbg_req_alloc(size, VMMDEVREQ_HGCM_CALL, requestor);
if (!call) {
ret = -ENOMEM;
goto free_bounce_bufs;
......@@ -647,9 +651,9 @@ EXPORT_SYMBOL(vbg_hgcm_call);
#ifdef CONFIG_COMPAT
int vbg_hgcm_call32(
struct vbg_dev *gdev, u32 client_id, u32 function, u32 timeout_ms,
struct vmmdev_hgcm_function_parameter32 *parm32, u32 parm_count,
int *vbox_status)
struct vbg_dev *gdev, u32 requestor, u32 client_id, u32 function,
u32 timeout_ms, struct vmmdev_hgcm_function_parameter32 *parm32,
u32 parm_count, int *vbox_status)
{
struct vmmdev_hgcm_function_parameter *parm64 = NULL;
u32 i, size;
......@@ -689,7 +693,7 @@ int vbg_hgcm_call32(
goto out_free;
}
ret = vbg_hgcm_call(gdev, client_id, function, timeout_ms,
ret = vbg_hgcm_call(gdev, requestor, client_id, function, timeout_ms,
parm64, parm_count, vbox_status);
if (ret < 0)
goto out_free;
......
......@@ -9,11 +9,10 @@
#ifndef __VBOX_VERSION_H__
#define __VBOX_VERSION_H__
/* Last synced October 4th 2017 */
#define VBG_VERSION_MAJOR 5
#define VBG_VERSION_MINOR 2
#define VBG_VERSION_MAJOR 6
#define VBG_VERSION_MINOR 0
#define VBG_VERSION_BUILD 0
#define VBG_SVN_REV 68940
#define VBG_VERSION_STRING "5.2.0"
#define VBG_SVN_REV 127566
#define VBG_VERSION_STRING "6.0.0"
#endif
......@@ -98,8 +98,8 @@ struct vmmdev_request_header {
s32 rc;
/** Reserved field no.1. MBZ. */
u32 reserved1;
/** Reserved field no.2. MBZ. */
u32 reserved2;
/** IN: Requestor information (VMMDEV_REQUESTOR_*) */
u32 requestor;
};
VMMDEV_ASSERT_SIZE(vmmdev_request_header, 24);
......@@ -247,6 +247,8 @@ struct vmmdev_guest_info {
};
VMMDEV_ASSERT_SIZE(vmmdev_guest_info, 24 + 8);
#define VMMDEV_GUEST_INFO2_ADDITIONS_FEATURES_REQUESTOR_INFO BIT(0)
/** struct vmmdev_guestinfo2 - Guest information report, version 2. */
struct vmmdev_guest_info2 {
/** Header. */
......@@ -259,7 +261,7 @@ struct vmmdev_guest_info2 {
u32 additions_build;
/** SVN revision. */
u32 additions_revision;
/** Feature mask, currently unused. */
/** Feature mask. */
u32 additions_features;
/**
* The intentional meaning of this field was:
......
......@@ -24,15 +24,17 @@ __printf(1, 2) void vbg_debug(const char *fmt, ...);
#define vbg_debug pr_debug
#endif
int vbg_hgcm_connect(struct vbg_dev *gdev,
int vbg_hgcm_connect(struct vbg_dev *gdev, u32 requestor,
struct vmmdev_hgcm_service_location *loc,
u32 *client_id, int *vbox_status);
int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 client_id, int *vbox_status);
int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 requestor,
u32 client_id, int *vbox_status);
int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function,
u32 timeout_ms, struct vmmdev_hgcm_function_parameter *parms,
u32 parm_count, int *vbox_status);
int vbg_hgcm_call(struct vbg_dev *gdev, u32 requestor, u32 client_id,
u32 function, u32 timeout_ms,
struct vmmdev_hgcm_function_parameter *parms, u32 parm_count,
int *vbox_status);
/**
* Convert a VirtualBox status code to a standard Linux kernel return value.
......
......@@ -102,6 +102,66 @@ enum vmmdev_request_type {
#define VMMDEVREQ_HGCM_CALL VMMDEVREQ_HGCM_CALL32
#endif
/* vmmdev_request_header.requestor defines */
/* Requestor user not given. */
#define VMMDEV_REQUESTOR_USR_NOT_GIVEN 0x00000000
/* The kernel driver (vboxguest) is the requestor. */
#define VMMDEV_REQUESTOR_USR_DRV 0x00000001
/* Some other kernel driver is the requestor. */
#define VMMDEV_REQUESTOR_USR_DRV_OTHER 0x00000002
/* The root or a admin user is the requestor. */
#define VMMDEV_REQUESTOR_USR_ROOT 0x00000003
/* Regular joe user is making the request. */
#define VMMDEV_REQUESTOR_USR_USER 0x00000006
/* User classification mask. */
#define VMMDEV_REQUESTOR_USR_MASK 0x00000007
/* Kernel mode request. Note this is 0, check for !USERMODE instead. */
#define VMMDEV_REQUESTOR_KERNEL 0x00000000
/* User mode request. */
#define VMMDEV_REQUESTOR_USERMODE 0x00000008
/* User or kernel mode classification mask. */
#define VMMDEV_REQUESTOR_MODE_MASK 0x00000008
/* Don't know the physical console association of the requestor. */
#define VMMDEV_REQUESTOR_CON_DONT_KNOW 0x00000000
/*
* The request originates with a process that is NOT associated with the
* physical console.
*/
#define VMMDEV_REQUESTOR_CON_NO 0x00000010
/* Requestor process is associated with the physical console. */
#define VMMDEV_REQUESTOR_CON_YES 0x00000020
/* Console classification mask. */
#define VMMDEV_REQUESTOR_CON_MASK 0x00000030
/* Requestor is member of special VirtualBox user group. */
#define VMMDEV_REQUESTOR_GRP_VBOX 0x00000080
/* Note: trust level is for windows guests only, linux always uses not-given */
/* Requestor trust level: Unspecified */
#define VMMDEV_REQUESTOR_TRUST_NOT_GIVEN 0x00000000
/* Requestor trust level: Untrusted (SID S-1-16-0) */
#define VMMDEV_REQUESTOR_TRUST_UNTRUSTED 0x00001000
/* Requestor trust level: Untrusted (SID S-1-16-4096) */
#define VMMDEV_REQUESTOR_TRUST_LOW 0x00002000
/* Requestor trust level: Medium (SID S-1-16-8192) */
#define VMMDEV_REQUESTOR_TRUST_MEDIUM 0x00003000
/* Requestor trust level: Medium plus (SID S-1-16-8448) */
#define VMMDEV_REQUESTOR_TRUST_MEDIUM_PLUS 0x00004000
/* Requestor trust level: High (SID S-1-16-12288) */
#define VMMDEV_REQUESTOR_TRUST_HIGH 0x00005000
/* Requestor trust level: System (SID S-1-16-16384) */
#define VMMDEV_REQUESTOR_TRUST_SYSTEM 0x00006000
/* Requestor trust level >= Protected (SID S-1-16-20480, S-1-16-28672) */
#define VMMDEV_REQUESTOR_TRUST_PROTECTED 0x00007000
/* Requestor trust level mask */
#define VMMDEV_REQUESTOR_TRUST_MASK 0x00007000
/* Requestor is using the less trusted user device node (/dev/vboxuser) */
#define VMMDEV_REQUESTOR_USER_DEVICE 0x00008000
/** HGCM service location types. */
enum vmmdev_hgcm_service_location_type {
VMMDEV_HGCM_LOC_INVALID = 0,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment