Commit d8a33273 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'char-misc-4.17-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc

Pull char/misc driver fixes from Greg KH:
 "Here are some small char and misc driver fixes for 4.17-rc3

  A variety of small things that have fallen out after 4.17-rc1 was out.
  Some vboxguest fixes for systems with lots of memory, amba bus fixes,
  some MAINTAINERS updates, uio_hv_generic driver fixes, and a few other
  minor things that resolve problems that people reported.

  The amba bus fixes took twice to get right, the first time I messed up
  applying the patches in the wrong order, hence the revert and later
  addition again with the correct fix, sorry about that.

  All of these have been in linux-next with no reported issues"

* tag 'char-misc-4.17-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc:
  ARM: amba: Fix race condition with driver_override
  ARM: amba: Make driver_override output consistent with other buses
  Revert "ARM: amba: Fix race condition with driver_override"
  ARM: amba: Don't read past the end of sysfs "driver_override" buffer
  ARM: amba: Fix race condition with driver_override
  virt: vbox: Log an error when we fail to get the host version
  virt: vbox: Use __get_free_pages instead of kmalloc for DMA32 memory
  virt: vbox: Add vbg_req_free() helper function
  virt: vbox: Move declarations of vboxguest private functions to private header
  slimbus: Fix out-of-bounds access in slim_slicesize()
  MAINTAINERS: add dri-devel&linaro-mm for Android ION
  fpga-manager: altera-ps-spi: preserve nCONFIG state
  MAINTAINERS: update my email address
  uio_hv_generic: fix subchannel ring mmap
  uio_hv_generic: use correct channel in isr
  uio_hv_generic: make ring buffer attribute for primary channel
  uio_hv_generic: set size of ring buffer attribute
  ANDROID: binder: prevent transactions into own process.
parents ee3748be 6a7228d9
......@@ -905,6 +905,8 @@ ANDROID ION DRIVER
M: Laura Abbott <labbott@redhat.com>
M: Sumit Semwal <sumit.semwal@linaro.org>
L: devel@driverdev.osuosl.org
L: dri-devel@lists.freedesktop.org
L: linaro-mm-sig@lists.linaro.org (moderated for non-subscribers)
S: Supported
F: drivers/staging/android/ion
F: drivers/staging/android/uapi/ion.h
......@@ -13953,7 +13955,7 @@ THUNDERBOLT DRIVER
M: Andreas Noever <andreas.noever@gmail.com>
M: Michael Jamet <michael.jamet@intel.com>
M: Mika Westerberg <mika.westerberg@linux.intel.com>
M: Yehezkel Bernat <yehezkel.bernat@intel.com>
M: Yehezkel Bernat <YehezkelShB@gmail.com>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt.git
S: Maintained
F: Documentation/admin-guide/thunderbolt.rst
......@@ -13963,7 +13965,7 @@ F: include/linux/thunderbolt.h
THUNDERBOLT NETWORK DRIVER
M: Michael Jamet <michael.jamet@intel.com>
M: Mika Westerberg <mika.westerberg@linux.intel.com>
M: Yehezkel Bernat <yehezkel.bernat@intel.com>
M: Yehezkel Bernat <YehezkelShB@gmail.com>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/thunderbolt.c
......
......@@ -69,11 +69,12 @@ static ssize_t driver_override_show(struct device *_dev,
struct device_attribute *attr, char *buf)
{
struct amba_device *dev = to_amba_device(_dev);
ssize_t len;
if (!dev->driver_override)
return 0;
return sprintf(buf, "%s\n", dev->driver_override);
device_lock(_dev);
len = sprintf(buf, "%s\n", dev->driver_override);
device_unlock(_dev);
return len;
}
static ssize_t driver_override_store(struct device *_dev,
......@@ -81,9 +82,10 @@ static ssize_t driver_override_store(struct device *_dev,
const char *buf, size_t count)
{
struct amba_device *dev = to_amba_device(_dev);
char *driver_override, *old = dev->driver_override, *cp;
char *driver_override, *old, *cp;
if (count > PATH_MAX)
/* We need to keep extra room for a newline */
if (count >= (PAGE_SIZE - 1))
return -EINVAL;
driver_override = kstrndup(buf, count, GFP_KERNEL);
......@@ -94,12 +96,15 @@ static ssize_t driver_override_store(struct device *_dev,
if (cp)
*cp = '\0';
device_lock(_dev);
old = dev->driver_override;
if (strlen(driver_override)) {
dev->driver_override = driver_override;
} else {
kfree(driver_override);
dev->driver_override = NULL;
}
device_unlock(_dev);
kfree(old);
......
......@@ -2839,6 +2839,14 @@ static void binder_transaction(struct binder_proc *proc,
else
return_error = BR_DEAD_REPLY;
mutex_unlock(&context->context_mgr_node_lock);
if (target_node && target_proc == proc) {
binder_user_error("%d:%d got transaction to context manager from process owning it\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
return_error_param = -EINVAL;
return_error_line = __LINE__;
goto err_invalid_target_handle;
}
}
if (!target_node) {
/*
......
......@@ -249,7 +249,7 @@ static int altera_ps_probe(struct spi_device *spi)
conf->data = of_id->data;
conf->spi = spi;
conf->config = devm_gpiod_get(&spi->dev, "nconfig", GPIOD_OUT_HIGH);
conf->config = devm_gpiod_get(&spi->dev, "nconfig", GPIOD_OUT_LOW);
if (IS_ERR(conf->config)) {
dev_err(&spi->dev, "Failed to get config gpio: %ld\n",
PTR_ERR(conf->config));
......
......@@ -183,7 +183,7 @@ static u16 slim_slicesize(int code)
0, 1, 2, 3, 3, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7
};
clamp(code, 1, (int)ARRAY_SIZE(sizetocode));
code = clamp(code, 1, (int)ARRAY_SIZE(sizetocode));
return sizetocode[code - 1];
}
......
......@@ -19,7 +19,7 @@
* # echo -n "ed963694-e847-4b2a-85af-bc9cfc11d6f3" \
* > /sys/bus/vmbus/drivers/uio_hv_generic/bind
*/
#define DEBUG 1
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/device.h>
......@@ -94,10 +94,11 @@ hv_uio_irqcontrol(struct uio_info *info, s32 irq_state)
*/
static void hv_uio_channel_cb(void *context)
{
struct hv_uio_private_data *pdata = context;
struct hv_device *dev = pdata->device;
struct vmbus_channel *chan = context;
struct hv_device *hv_dev = chan->device_obj;
struct hv_uio_private_data *pdata = hv_get_drvdata(hv_dev);
dev->channel->inbound.ring_buffer->interrupt_mask = 1;
chan->inbound.ring_buffer->interrupt_mask = 1;
virt_mb();
uio_event_notify(&pdata->info);
......@@ -121,78 +122,46 @@ static void hv_uio_rescind(struct vmbus_channel *channel)
uio_event_notify(&pdata->info);
}
/*
* Handle fault when looking for sub channel ring buffer
* Subchannel ring buffer is same as resource 0 which is main ring buffer
* This is derived from uio_vma_fault
/* Sysfs API to allow mmap of the ring buffers
* The ring buffer is allocated as contiguous memory by vmbus_open
*/
static int hv_uio_vma_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
void *ring_buffer = vma->vm_private_data;
struct page *page;
void *addr;
addr = ring_buffer + (vmf->pgoff << PAGE_SHIFT);
page = virt_to_page(addr);
get_page(page);
vmf->page = page;
return 0;
}
static const struct vm_operations_struct hv_uio_vm_ops = {
.fault = hv_uio_vma_fault,
};
/* Sysfs API to allow mmap of the ring buffers */
static int hv_uio_ring_mmap(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
struct vm_area_struct *vma)
{
struct vmbus_channel *channel
= container_of(kobj, struct vmbus_channel, kobj);
unsigned long requested_pages, actual_pages;
if (vma->vm_end < vma->vm_start)
return -EINVAL;
/* only allow 0 for now */
if (vma->vm_pgoff > 0)
return -EINVAL;
struct hv_device *dev = channel->primary_channel->device_obj;
u16 q_idx = channel->offermsg.offer.sub_channel_index;
requested_pages = vma_pages(vma);
actual_pages = 2 * HV_RING_SIZE;
if (requested_pages > actual_pages)
return -EINVAL;
dev_dbg(&dev->device, "mmap channel %u pages %#lx at %#lx\n",
q_idx, vma_pages(vma), vma->vm_pgoff);
vma->vm_private_data = channel->ringbuffer_pages;
vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_ops = &hv_uio_vm_ops;
return 0;
return vm_iomap_memory(vma, virt_to_phys(channel->ringbuffer_pages),
channel->ringbuffer_pagecount << PAGE_SHIFT);
}
static struct bin_attribute ring_buffer_bin_attr __ro_after_init = {
static const struct bin_attribute ring_buffer_bin_attr = {
.attr = {
.name = "ring",
.mode = 0600,
/* size is set at init time */
},
.size = 2 * HV_RING_SIZE * PAGE_SIZE,
.mmap = hv_uio_ring_mmap,
};
/* Callback from VMBUS subystem when new channel created. */
/* Callback from VMBUS subsystem when new channel created. */
static void
hv_uio_new_channel(struct vmbus_channel *new_sc)
{
struct hv_device *hv_dev = new_sc->primary_channel->device_obj;
struct device *device = &hv_dev->device;
struct hv_uio_private_data *pdata = hv_get_drvdata(hv_dev);
const size_t ring_bytes = HV_RING_SIZE * PAGE_SIZE;
int ret;
/* Create host communication ring */
ret = vmbus_open(new_sc, ring_bytes, ring_bytes, NULL, 0,
hv_uio_channel_cb, pdata);
hv_uio_channel_cb, new_sc);
if (ret) {
dev_err(device, "vmbus_open subchannel failed: %d\n", ret);
return;
......@@ -234,7 +203,7 @@ hv_uio_probe(struct hv_device *dev,
ret = vmbus_open(dev->channel, HV_RING_SIZE * PAGE_SIZE,
HV_RING_SIZE * PAGE_SIZE, NULL, 0,
hv_uio_channel_cb, pdata);
hv_uio_channel_cb, dev->channel);
if (ret)
goto fail;
......@@ -326,6 +295,11 @@ hv_uio_probe(struct hv_device *dev,
vmbus_set_chn_rescind_callback(dev->channel, hv_uio_rescind);
vmbus_set_sc_create_callback(dev->channel, hv_uio_new_channel);
ret = sysfs_create_bin_file(&dev->channel->kobj, &ring_buffer_bin_attr);
if (ret)
dev_notice(&dev->device,
"sysfs create ring bin file failed; %d\n", ret);
hv_set_drvdata(dev, pdata);
return 0;
......
......@@ -114,7 +114,7 @@ static void vbg_guest_mappings_init(struct vbg_dev *gdev)
}
out:
kfree(req);
vbg_req_free(req, sizeof(*req));
kfree(pages);
}
......@@ -144,7 +144,7 @@ static void vbg_guest_mappings_exit(struct vbg_dev *gdev)
rc = vbg_req_perform(gdev, req);
kfree(req);
vbg_req_free(req, sizeof(*req));
if (rc < 0) {
vbg_err("%s error: %d\n", __func__, rc);
......@@ -214,8 +214,8 @@ static int vbg_report_guest_info(struct vbg_dev *gdev)
ret = vbg_status_code_to_errno(rc);
out_free:
kfree(req2);
kfree(req1);
vbg_req_free(req2, sizeof(*req2));
vbg_req_free(req1, sizeof(*req1));
return ret;
}
......@@ -245,7 +245,7 @@ static int vbg_report_driver_status(struct vbg_dev *gdev, bool active)
if (rc == VERR_NOT_IMPLEMENTED) /* Compatibility with older hosts. */
rc = VINF_SUCCESS;
kfree(req);
vbg_req_free(req, sizeof(*req));
return vbg_status_code_to_errno(rc);
}
......@@ -431,7 +431,7 @@ static int vbg_heartbeat_host_config(struct vbg_dev *gdev, bool enabled)
rc = vbg_req_perform(gdev, req);
do_div(req->interval_ns, 1000000); /* ns -> ms */
gdev->heartbeat_interval_ms = req->interval_ns;
kfree(req);
vbg_req_free(req, sizeof(*req));
return vbg_status_code_to_errno(rc);
}
......@@ -454,12 +454,6 @@ static int vbg_heartbeat_init(struct vbg_dev *gdev)
if (ret < 0)
return ret;
/*
* Preallocate the request to use it from the timer callback because:
* 1) on Windows vbg_req_alloc must be called at IRQL <= APC_LEVEL
* and the timer callback runs at DISPATCH_LEVEL;
* 2) avoid repeated allocations.
*/
gdev->guest_heartbeat_req = vbg_req_alloc(
sizeof(*gdev->guest_heartbeat_req),
VMMDEVREQ_GUEST_HEARTBEAT);
......@@ -481,8 +475,8 @@ static void vbg_heartbeat_exit(struct vbg_dev *gdev)
{
del_timer_sync(&gdev->heartbeat_timer);
vbg_heartbeat_host_config(gdev, false);
kfree(gdev->guest_heartbeat_req);
vbg_req_free(gdev->guest_heartbeat_req,
sizeof(*gdev->guest_heartbeat_req));
}
/**
......@@ -543,7 +537,7 @@ static int vbg_reset_host_event_filter(struct vbg_dev *gdev,
if (rc < 0)
vbg_err("%s error, rc: %d\n", __func__, rc);
kfree(req);
vbg_req_free(req, sizeof(*req));
return vbg_status_code_to_errno(rc);
}
......@@ -617,7 +611,7 @@ static int vbg_set_session_event_filter(struct vbg_dev *gdev,
out:
mutex_unlock(&gdev->session_mutex);
kfree(req);
vbg_req_free(req, sizeof(*req));
return ret;
}
......@@ -642,7 +636,7 @@ static int vbg_reset_host_capabilities(struct vbg_dev *gdev)
if (rc < 0)
vbg_err("%s error, rc: %d\n", __func__, rc);
kfree(req);
vbg_req_free(req, sizeof(*req));
return vbg_status_code_to_errno(rc);
}
......@@ -712,7 +706,7 @@ static int vbg_set_session_capabilities(struct vbg_dev *gdev,
out:
mutex_unlock(&gdev->session_mutex);
kfree(req);
vbg_req_free(req, sizeof(*req));
return ret;
}
......@@ -733,8 +727,10 @@ static int vbg_query_host_version(struct vbg_dev *gdev)
rc = vbg_req_perform(gdev, req);
ret = vbg_status_code_to_errno(rc);
if (ret)
if (ret) {
vbg_err("%s error: %d\n", __func__, rc);
goto out;
}
snprintf(gdev->host_version, sizeof(gdev->host_version), "%u.%u.%ur%u",
req->major, req->minor, req->build, req->revision);
......@@ -749,7 +745,7 @@ static int vbg_query_host_version(struct vbg_dev *gdev)
}
out:
kfree(req);
vbg_req_free(req, sizeof(*req));
return ret;
}
......@@ -847,11 +843,16 @@ int vbg_core_init(struct vbg_dev *gdev, u32 fixed_events)
return 0;
err_free_reqs:
kfree(gdev->mouse_status_req);
kfree(gdev->ack_events_req);
kfree(gdev->cancel_req);
kfree(gdev->mem_balloon.change_req);
kfree(gdev->mem_balloon.get_req);
vbg_req_free(gdev->mouse_status_req,
sizeof(*gdev->mouse_status_req));
vbg_req_free(gdev->ack_events_req,
sizeof(*gdev->ack_events_req));
vbg_req_free(gdev->cancel_req,
sizeof(*gdev->cancel_req));
vbg_req_free(gdev->mem_balloon.change_req,
sizeof(*gdev->mem_balloon.change_req));
vbg_req_free(gdev->mem_balloon.get_req,
sizeof(*gdev->mem_balloon.get_req));
return ret;
}
......@@ -872,11 +873,16 @@ void vbg_core_exit(struct vbg_dev *gdev)
vbg_reset_host_capabilities(gdev);
vbg_core_set_mouse_status(gdev, 0);
kfree(gdev->mouse_status_req);
kfree(gdev->ack_events_req);
kfree(gdev->cancel_req);
kfree(gdev->mem_balloon.change_req);
kfree(gdev->mem_balloon.get_req);
vbg_req_free(gdev->mouse_status_req,
sizeof(*gdev->mouse_status_req));
vbg_req_free(gdev->ack_events_req,
sizeof(*gdev->ack_events_req));
vbg_req_free(gdev->cancel_req,
sizeof(*gdev->cancel_req));
vbg_req_free(gdev->mem_balloon.change_req,
sizeof(*gdev->mem_balloon.change_req));
vbg_req_free(gdev->mem_balloon.get_req,
sizeof(*gdev->mem_balloon.get_req));
}
/**
......@@ -1415,7 +1421,7 @@ static int vbg_ioctl_write_core_dump(struct vbg_dev *gdev,
req->flags = dump->u.in.flags;
dump->hdr.rc = vbg_req_perform(gdev, req);
kfree(req);
vbg_req_free(req, sizeof(*req));
return 0;
}
......@@ -1513,7 +1519,7 @@ int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features)
if (rc < 0)
vbg_err("%s error, rc: %d\n", __func__, rc);
kfree(req);
vbg_req_free(req, sizeof(*req));
return vbg_status_code_to_errno(rc);
}
......
......@@ -171,4 +171,13 @@ irqreturn_t vbg_core_isr(int irq, void *dev_id);
void vbg_linux_mouse_event(struct vbg_dev *gdev);
/* Private (non exported) functions form vboxguest_utils.c */
void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type);
void vbg_req_free(void *req, size_t len);
int vbg_req_perform(struct vbg_dev *gdev, void *req);
int vbg_hgcm_call32(
struct vbg_dev *gdev, u32 client_id, u32 function, u32 timeout_ms,
struct vmmdev_hgcm_function_parameter32 *parm32, u32 parm_count,
int *vbox_status);
#endif
......@@ -87,6 +87,7 @@ static long vbg_misc_device_ioctl(struct file *filp, unsigned int req,
struct vbg_session *session = filp->private_data;
size_t returned_size, size;
struct vbg_ioctl_hdr hdr;
bool is_vmmdev_req;
int ret = 0;
void *buf;
......@@ -106,8 +107,17 @@ static long vbg_misc_device_ioctl(struct file *filp, unsigned int req,
if (size > SZ_16M)
return -E2BIG;
/* __GFP_DMA32 because IOCTL_VMMDEV_REQUEST passes this to the host */
buf = kmalloc(size, GFP_KERNEL | __GFP_DMA32);
/*
* IOCTL_VMMDEV_REQUEST needs the buffer to be below 4G to avoid
* the need for a bounce-buffer and another copy later on.
*/
is_vmmdev_req = (req & ~IOCSIZE_MASK) == VBG_IOCTL_VMMDEV_REQUEST(0) ||
req == VBG_IOCTL_VMMDEV_REQUEST_BIG;
if (is_vmmdev_req)
buf = vbg_req_alloc(size, VBG_IOCTL_HDR_TYPE_DEFAULT);
else
buf = kmalloc(size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
......@@ -132,7 +142,10 @@ static long vbg_misc_device_ioctl(struct file *filp, unsigned int req,
ret = -EFAULT;
out:
kfree(buf);
if (is_vmmdev_req)
vbg_req_free(buf, size);
else
kfree(buf);
return ret;
}
......
......@@ -65,8 +65,9 @@ VBG_LOG(vbg_debug, pr_debug);
void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type)
{
struct vmmdev_request_header *req;
int order = get_order(PAGE_ALIGN(len));
req = kmalloc(len, GFP_KERNEL | __GFP_DMA32);
req = (void *)__get_free_pages(GFP_KERNEL | GFP_DMA32, order);
if (!req)
return NULL;
......@@ -82,6 +83,14 @@ void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type)
return req;
}
void vbg_req_free(void *req, size_t len)
{
if (!req)
return;
free_pages((unsigned long)req, get_order(PAGE_ALIGN(len)));
}
/* Note this function returns a VBox status code, not a negative errno!! */
int vbg_req_perform(struct vbg_dev *gdev, void *req)
{
......@@ -137,7 +146,7 @@ int vbg_hgcm_connect(struct vbg_dev *gdev,
rc = hgcm_connect->header.result;
}
kfree(hgcm_connect);
vbg_req_free(hgcm_connect, sizeof(*hgcm_connect));
*vbox_status = rc;
return 0;
......@@ -166,7 +175,7 @@ int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 client_id, int *vbox_status)
if (rc >= 0)
rc = hgcm_disconnect->header.result;
kfree(hgcm_disconnect);
vbg_req_free(hgcm_disconnect, sizeof(*hgcm_disconnect));
*vbox_status = rc;
return 0;
......@@ -623,7 +632,7 @@ int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function,
}
if (!leak_it)
kfree(call);
vbg_req_free(call, size);
free_bounce_bufs:
if (bounce_bufs) {
......
......@@ -24,24 +24,6 @@ __printf(1, 2) void vbg_debug(const char *fmt, ...);
#define vbg_debug pr_debug
#endif
/**
* Allocate memory for generic request and initialize the request header.
*
* Return: the allocated memory
* @len: Size of memory block required for the request.
* @req_type: The generic request type.
*/
void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type);
/**
* Perform a generic request.
*
* Return: VBox status code
* @gdev: The Guest extension device.
* @req: Pointer to the request structure.
*/
int vbg_req_perform(struct vbg_dev *gdev, void *req);
int vbg_hgcm_connect(struct vbg_dev *gdev,
struct vmmdev_hgcm_service_location *loc,
u32 *client_id, int *vbox_status);
......@@ -52,11 +34,6 @@ int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function,
u32 timeout_ms, struct vmmdev_hgcm_function_parameter *parms,
u32 parm_count, int *vbox_status);
int vbg_hgcm_call32(
struct vbg_dev *gdev, u32 client_id, u32 function, u32 timeout_ms,
struct vmmdev_hgcm_function_parameter32 *parm32, u32 parm_count,
int *vbox_status);
/**
* Convert a VirtualBox status code to a standard Linux kernel return value.
* Return: 0 or negative errno value.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment