Commit ae7df8f9 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'char-misc-4.14-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc

Pull char/misc driver fixes from Greg KH:
 "Here are 4 patches to resolve some char/misc driver issues found these
  past weeks.

  One of them is a mei bugfix and another is a new mei device id. There
  is also a hyper-v fix for a reported issue, and a binder issue fix for
  a problem reported by a few people.

  All of these have been in my tree for a while, I don't know if
  linux-next is really testing much this month. But 0-day is happy with
  them :)"

* tag 'char-misc-4.14-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc:
  binder: fix use-after-free in binder_transaction()
  Drivers: hv: vmbus: Fix bugs in rescind handling
  mei: me: add gemini lake devices id
  mei: always use domain runtime pm callbacks.
parents 7a263b16 512cf465
...@@ -2582,6 +2582,48 @@ static bool binder_proc_transaction(struct binder_transaction *t, ...@@ -2582,6 +2582,48 @@ static bool binder_proc_transaction(struct binder_transaction *t,
return true; return true;
} }
/**
* binder_get_node_refs_for_txn() - Get required refs on node for txn
* @node: struct binder_node for which to get refs
* @proc: returns @node->proc if valid
* @error: if no @proc then returns BR_DEAD_REPLY
*
* User-space normally keeps the node alive when creating a transaction
* since it has a reference to the target. The local strong ref keeps it
* alive if the sending process dies before the target process processes
* the transaction. If the source process is malicious or has a reference
* counting bug, relying on the local strong ref can fail.
*
* Since user-space can cause the local strong ref to go away, we also take
* a tmpref on the node to ensure it survives while we are constructing
* the transaction. We also need a tmpref on the proc while we are
* constructing the transaction, so we take that here as well.
*
* Return: The target_node with refs taken or NULL if no @node->proc is NULL.
* Also sets @proc if valid. If the @node->proc is NULL indicating that the
* target proc has died, @error is set to BR_DEAD_REPLY
*/
static struct binder_node *binder_get_node_refs_for_txn(
struct binder_node *node,
struct binder_proc **procp,
uint32_t *error)
{
struct binder_node *target_node = NULL;
binder_node_inner_lock(node);
if (node->proc) {
target_node = node;
binder_inc_node_nilocked(node, 1, 0, NULL);
binder_inc_node_tmpref_ilocked(node);
node->proc->tmp_ref++;
*procp = node->proc;
} else
*error = BR_DEAD_REPLY;
binder_node_inner_unlock(node);
return target_node;
}
static void binder_transaction(struct binder_proc *proc, static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread, struct binder_thread *thread,
struct binder_transaction_data *tr, int reply, struct binder_transaction_data *tr, int reply,
...@@ -2685,43 +2727,35 @@ static void binder_transaction(struct binder_proc *proc, ...@@ -2685,43 +2727,35 @@ static void binder_transaction(struct binder_proc *proc,
ref = binder_get_ref_olocked(proc, tr->target.handle, ref = binder_get_ref_olocked(proc, tr->target.handle,
true); true);
if (ref) { if (ref) {
binder_inc_node(ref->node, 1, 0, NULL); target_node = binder_get_node_refs_for_txn(
target_node = ref->node; ref->node, &target_proc,
} &return_error);
binder_proc_unlock(proc); } else {
if (target_node == NULL) {
binder_user_error("%d:%d got transaction to invalid handle\n", binder_user_error("%d:%d got transaction to invalid handle\n",
proc->pid, thread->pid); proc->pid, thread->pid);
return_error = BR_FAILED_REPLY; return_error = BR_FAILED_REPLY;
return_error_param = -EINVAL;
return_error_line = __LINE__;
goto err_invalid_target_handle;
} }
binder_proc_unlock(proc);
} else { } else {
mutex_lock(&context->context_mgr_node_lock); mutex_lock(&context->context_mgr_node_lock);
target_node = context->binder_context_mgr_node; target_node = context->binder_context_mgr_node;
if (target_node == NULL) { if (target_node)
target_node = binder_get_node_refs_for_txn(
target_node, &target_proc,
&return_error);
else
return_error = BR_DEAD_REPLY; return_error = BR_DEAD_REPLY;
mutex_unlock(&context->context_mgr_node_lock); mutex_unlock(&context->context_mgr_node_lock);
return_error_line = __LINE__;
goto err_no_context_mgr_node;
} }
binder_inc_node(target_node, 1, 0, NULL); if (!target_node) {
mutex_unlock(&context->context_mgr_node_lock); /*
} * return_error is set above
e->to_node = target_node->debug_id; */
binder_node_lock(target_node); return_error_param = -EINVAL;
target_proc = target_node->proc;
if (target_proc == NULL) {
binder_node_unlock(target_node);
return_error = BR_DEAD_REPLY;
return_error_line = __LINE__; return_error_line = __LINE__;
goto err_dead_binder; goto err_dead_binder;
} }
binder_inner_proc_lock(target_proc); e->to_node = target_node->debug_id;
target_proc->tmp_ref++;
binder_inner_proc_unlock(target_proc);
binder_node_unlock(target_node);
if (security_binder_transaction(proc->tsk, if (security_binder_transaction(proc->tsk,
target_proc->tsk) < 0) { target_proc->tsk) < 0) {
return_error = BR_FAILED_REPLY; return_error = BR_FAILED_REPLY;
...@@ -3071,6 +3105,8 @@ static void binder_transaction(struct binder_proc *proc, ...@@ -3071,6 +3105,8 @@ static void binder_transaction(struct binder_proc *proc,
if (target_thread) if (target_thread)
binder_thread_dec_tmpref(target_thread); binder_thread_dec_tmpref(target_thread);
binder_proc_dec_tmpref(target_proc); binder_proc_dec_tmpref(target_proc);
if (target_node)
binder_dec_node_tmpref(target_node);
/* /*
* write barrier to synchronize with initialization * write barrier to synchronize with initialization
* of log entry * of log entry
...@@ -3090,6 +3126,8 @@ static void binder_transaction(struct binder_proc *proc, ...@@ -3090,6 +3126,8 @@ static void binder_transaction(struct binder_proc *proc,
err_copy_data_failed: err_copy_data_failed:
trace_binder_transaction_failed_buffer_release(t->buffer); trace_binder_transaction_failed_buffer_release(t->buffer);
binder_transaction_buffer_release(target_proc, t->buffer, offp); binder_transaction_buffer_release(target_proc, t->buffer, offp);
if (target_node)
binder_dec_node_tmpref(target_node);
target_node = NULL; target_node = NULL;
t->buffer->transaction = NULL; t->buffer->transaction = NULL;
binder_alloc_free_buf(&target_proc->alloc, t->buffer); binder_alloc_free_buf(&target_proc->alloc, t->buffer);
...@@ -3104,13 +3142,14 @@ static void binder_transaction(struct binder_proc *proc, ...@@ -3104,13 +3142,14 @@ static void binder_transaction(struct binder_proc *proc,
err_empty_call_stack: err_empty_call_stack:
err_dead_binder: err_dead_binder:
err_invalid_target_handle: err_invalid_target_handle:
err_no_context_mgr_node:
if (target_thread) if (target_thread)
binder_thread_dec_tmpref(target_thread); binder_thread_dec_tmpref(target_thread);
if (target_proc) if (target_proc)
binder_proc_dec_tmpref(target_proc); binder_proc_dec_tmpref(target_proc);
if (target_node) if (target_node) {
binder_dec_node(target_node, 1, 0); binder_dec_node(target_node, 1, 0);
binder_dec_node_tmpref(target_node);
}
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
"%d:%d transaction failed %d/%d, size %lld-%lld line %d\n", "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
......
...@@ -640,6 +640,7 @@ void vmbus_close(struct vmbus_channel *channel) ...@@ -640,6 +640,7 @@ void vmbus_close(struct vmbus_channel *channel)
*/ */
return; return;
} }
mutex_lock(&vmbus_connection.channel_mutex);
/* /*
* Close all the sub-channels first and then close the * Close all the sub-channels first and then close the
* primary channel. * primary channel.
...@@ -648,16 +649,15 @@ void vmbus_close(struct vmbus_channel *channel) ...@@ -648,16 +649,15 @@ void vmbus_close(struct vmbus_channel *channel)
cur_channel = list_entry(cur, struct vmbus_channel, sc_list); cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
vmbus_close_internal(cur_channel); vmbus_close_internal(cur_channel);
if (cur_channel->rescind) { if (cur_channel->rescind) {
mutex_lock(&vmbus_connection.channel_mutex); hv_process_channel_removal(
hv_process_channel_removal(cur_channel,
cur_channel->offermsg.child_relid); cur_channel->offermsg.child_relid);
mutex_unlock(&vmbus_connection.channel_mutex);
} }
} }
/* /*
* Now close the primary. * Now close the primary.
*/ */
vmbus_close_internal(channel); vmbus_close_internal(channel);
mutex_unlock(&vmbus_connection.channel_mutex);
} }
EXPORT_SYMBOL_GPL(vmbus_close); EXPORT_SYMBOL_GPL(vmbus_close);
......
...@@ -159,7 +159,7 @@ static void vmbus_rescind_cleanup(struct vmbus_channel *channel) ...@@ -159,7 +159,7 @@ static void vmbus_rescind_cleanup(struct vmbus_channel *channel)
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
channel->rescind = true;
list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list, list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
msglistentry) { msglistentry) {
...@@ -381,14 +381,21 @@ static void vmbus_release_relid(u32 relid) ...@@ -381,14 +381,21 @@ static void vmbus_release_relid(u32 relid)
true); true);
} }
void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid) void hv_process_channel_removal(u32 relid)
{ {
unsigned long flags; unsigned long flags;
struct vmbus_channel *primary_channel; struct vmbus_channel *primary_channel, *channel;
BUG_ON(!channel->rescind);
BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex)); BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex));
/*
* Make sure channel is valid as we may have raced.
*/
channel = relid2channel(relid);
if (!channel)
return;
BUG_ON(!channel->rescind);
if (channel->target_cpu != get_cpu()) { if (channel->target_cpu != get_cpu()) {
put_cpu(); put_cpu();
smp_call_function_single(channel->target_cpu, smp_call_function_single(channel->target_cpu,
...@@ -515,6 +522,7 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel) ...@@ -515,6 +522,7 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
if (!fnew) { if (!fnew) {
if (channel->sc_creation_callback != NULL) if (channel->sc_creation_callback != NULL)
channel->sc_creation_callback(newchannel); channel->sc_creation_callback(newchannel);
newchannel->probe_done = true;
return; return;
} }
...@@ -834,7 +842,6 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr) ...@@ -834,7 +842,6 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
{ {
struct vmbus_channel_rescind_offer *rescind; struct vmbus_channel_rescind_offer *rescind;
struct vmbus_channel *channel; struct vmbus_channel *channel;
unsigned long flags;
struct device *dev; struct device *dev;
rescind = (struct vmbus_channel_rescind_offer *)hdr; rescind = (struct vmbus_channel_rescind_offer *)hdr;
...@@ -873,16 +880,6 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr) ...@@ -873,16 +880,6 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
return; return;
} }
spin_lock_irqsave(&channel->lock, flags);
channel->rescind = true;
spin_unlock_irqrestore(&channel->lock, flags);
/*
* Now that we have posted the rescind state, perform
* rescind related cleanup.
*/
vmbus_rescind_cleanup(channel);
/* /*
* Now wait for offer handling to complete. * Now wait for offer handling to complete.
*/ */
...@@ -901,6 +898,7 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr) ...@@ -901,6 +898,7 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
if (channel->device_obj) { if (channel->device_obj) {
if (channel->chn_rescind_callback) { if (channel->chn_rescind_callback) {
channel->chn_rescind_callback(channel); channel->chn_rescind_callback(channel);
vmbus_rescind_cleanup(channel);
return; return;
} }
/* /*
...@@ -909,6 +907,7 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr) ...@@ -909,6 +907,7 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
*/ */
dev = get_device(&channel->device_obj->device); dev = get_device(&channel->device_obj->device);
if (dev) { if (dev) {
vmbus_rescind_cleanup(channel);
vmbus_device_unregister(channel->device_obj); vmbus_device_unregister(channel->device_obj);
put_device(dev); put_device(dev);
} }
...@@ -921,16 +920,16 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr) ...@@ -921,16 +920,16 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
* 1. Close all sub-channels first * 1. Close all sub-channels first
* 2. Then close the primary channel. * 2. Then close the primary channel.
*/ */
mutex_lock(&vmbus_connection.channel_mutex);
vmbus_rescind_cleanup(channel);
if (channel->state == CHANNEL_OPEN_STATE) { if (channel->state == CHANNEL_OPEN_STATE) {
/* /*
* The channel is currently not open; * The channel is currently not open;
* it is safe for us to cleanup the channel. * it is safe for us to cleanup the channel.
*/ */
mutex_lock(&vmbus_connection.channel_mutex); hv_process_channel_removal(rescind->child_relid);
hv_process_channel_removal(channel,
channel->offermsg.child_relid);
mutex_unlock(&vmbus_connection.channel_mutex);
} }
mutex_unlock(&vmbus_connection.channel_mutex);
} }
} }
......
...@@ -768,8 +768,7 @@ static void vmbus_device_release(struct device *device) ...@@ -768,8 +768,7 @@ static void vmbus_device_release(struct device *device)
struct vmbus_channel *channel = hv_dev->channel; struct vmbus_channel *channel = hv_dev->channel;
mutex_lock(&vmbus_connection.channel_mutex); mutex_lock(&vmbus_connection.channel_mutex);
hv_process_channel_removal(channel, hv_process_channel_removal(channel->offermsg.child_relid);
channel->offermsg.child_relid);
mutex_unlock(&vmbus_connection.channel_mutex); mutex_unlock(&vmbus_connection.channel_mutex);
kfree(hv_dev); kfree(hv_dev);
......
...@@ -127,6 +127,8 @@ ...@@ -127,6 +127,8 @@
#define MEI_DEV_ID_BXT_M 0x1A9A /* Broxton M */ #define MEI_DEV_ID_BXT_M 0x1A9A /* Broxton M */
#define MEI_DEV_ID_APL_I 0x5A9A /* Apollo Lake I */ #define MEI_DEV_ID_APL_I 0x5A9A /* Apollo Lake I */
#define MEI_DEV_ID_GLK 0x319A /* Gemini Lake */
#define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */ #define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */
#define MEI_DEV_ID_KBP_2 0xA2BB /* Kaby Point 2 */ #define MEI_DEV_ID_KBP_2 0xA2BB /* Kaby Point 2 */
......
...@@ -93,6 +93,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = { ...@@ -93,6 +93,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_GLK, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, MEI_ME_PCH8_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, MEI_ME_PCH8_CFG)},
...@@ -226,11 +228,14 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -226,11 +228,14 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME; pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME;
/* /*
* For not wake-able HW runtime pm framework * ME maps runtime suspend/resume to D0i states,
* can't be used on pci device level. * hence we need to go around native PCI runtime service which
* Use domain runtime pm callbacks instead. * eventually brings the device into D3cold/hot state,
* but the mei device cannot wake up from D3 unlike from D0i3.
* To get around the PCI device native runtime pm,
* ME uses runtime pm domain handlers which take precedence
* over the driver's pm handlers.
*/ */
if (!pci_dev_run_wake(pdev))
mei_me_set_pm_domain(dev); mei_me_set_pm_domain(dev);
if (mei_pg_is_enabled(dev)) if (mei_pg_is_enabled(dev))
...@@ -271,7 +276,6 @@ static void mei_me_shutdown(struct pci_dev *pdev) ...@@ -271,7 +276,6 @@ static void mei_me_shutdown(struct pci_dev *pdev)
dev_dbg(&pdev->dev, "shutdown\n"); dev_dbg(&pdev->dev, "shutdown\n");
mei_stop(dev); mei_stop(dev);
if (!pci_dev_run_wake(pdev))
mei_me_unset_pm_domain(dev); mei_me_unset_pm_domain(dev);
mei_disable_interrupts(dev); mei_disable_interrupts(dev);
...@@ -300,7 +304,6 @@ static void mei_me_remove(struct pci_dev *pdev) ...@@ -300,7 +304,6 @@ static void mei_me_remove(struct pci_dev *pdev)
dev_dbg(&pdev->dev, "stop\n"); dev_dbg(&pdev->dev, "stop\n");
mei_stop(dev); mei_stop(dev);
if (!pci_dev_run_wake(pdev))
mei_me_unset_pm_domain(dev); mei_me_unset_pm_domain(dev);
mei_disable_interrupts(dev); mei_disable_interrupts(dev);
......
...@@ -144,11 +144,13 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -144,11 +144,13 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME; pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME;
/* /*
* For not wake-able HW runtime pm framework * TXE maps runtime suspend/resume to own power gating states,
* can't be used on pci device level. * hence we need to go around native PCI runtime service which
* Use domain runtime pm callbacks instead. * eventually brings the device into D3cold/hot state.
* But the TXE device cannot wake up from D3 unlike from own
* power gating. To get around PCI device native runtime pm,
* TXE uses runtime pm domain handlers which take precedence.
*/ */
if (!pci_dev_run_wake(pdev))
mei_txe_set_pm_domain(dev); mei_txe_set_pm_domain(dev);
pm_runtime_put_noidle(&pdev->dev); pm_runtime_put_noidle(&pdev->dev);
...@@ -186,7 +188,6 @@ static void mei_txe_shutdown(struct pci_dev *pdev) ...@@ -186,7 +188,6 @@ static void mei_txe_shutdown(struct pci_dev *pdev)
dev_dbg(&pdev->dev, "shutdown\n"); dev_dbg(&pdev->dev, "shutdown\n");
mei_stop(dev); mei_stop(dev);
if (!pci_dev_run_wake(pdev))
mei_txe_unset_pm_domain(dev); mei_txe_unset_pm_domain(dev);
mei_disable_interrupts(dev); mei_disable_interrupts(dev);
...@@ -215,7 +216,6 @@ static void mei_txe_remove(struct pci_dev *pdev) ...@@ -215,7 +216,6 @@ static void mei_txe_remove(struct pci_dev *pdev)
mei_stop(dev); mei_stop(dev);
if (!pci_dev_run_wake(pdev))
mei_txe_unset_pm_domain(dev); mei_txe_unset_pm_domain(dev);
mei_disable_interrupts(dev); mei_disable_interrupts(dev);
...@@ -318,15 +318,7 @@ static int mei_txe_pm_runtime_suspend(struct device *device) ...@@ -318,15 +318,7 @@ static int mei_txe_pm_runtime_suspend(struct device *device)
else else
ret = -EAGAIN; ret = -EAGAIN;
/* /* keep irq on we are staying in D0 */
* If everything is okay we're about to enter PCI low
* power state (D3) therefor we need to disable the
* interrupts towards host.
* However if device is not wakeable we do not enter
* D-low state and we need to keep the interrupt kicking
*/
if (!ret && pci_dev_run_wake(pdev))
mei_disable_interrupts(dev);
dev_dbg(&pdev->dev, "rpm: txe: runtime suspend ret=%d\n", ret); dev_dbg(&pdev->dev, "rpm: txe: runtime suspend ret=%d\n", ret);
......
...@@ -1403,7 +1403,7 @@ extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf, ...@@ -1403,7 +1403,7 @@ extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf,
const int *srv_version, int srv_vercnt, const int *srv_version, int srv_vercnt,
int *nego_fw_version, int *nego_srv_version); int *nego_fw_version, int *nego_srv_version);
void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid); void hv_process_channel_removal(u32 relid);
void vmbus_setevent(struct vmbus_channel *channel); void vmbus_setevent(struct vmbus_channel *channel);
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment