Commit bed429e1 authored by Jakub Kicinski's avatar Jakub Kicinski Committed by Kalle Valo

mt7601u: fix dma from stack address

DMA to variables located on the stack is a bad idea.
For simplicity and to avoid frequent allocations create
a buffer inside the device structure.  Protect this
buffer with vendor_req_mutex.  Don't protect vendor
requests which don't use this buffer.
Signed-off-by: default avatarJakub Kicinski <kubakici@wp.pl>
Signed-off-by: default avatarKalle Valo <kvalo@codeaurora.org>
parent 7845af35
...@@ -146,7 +146,7 @@ enum { ...@@ -146,7 +146,7 @@ enum {
* @rx_lock: protects @rx_q. * @rx_lock: protects @rx_q.
* @con_mon_lock: protects @ap_bssid, @bcn_*, @avg_rssi. * @con_mon_lock: protects @ap_bssid, @bcn_*, @avg_rssi.
* @mutex: ensures exclusive access from mac80211 callbacks. * @mutex: ensures exclusive access from mac80211 callbacks.
* @vendor_req_mutex: ensures atomicity of vendor requests. * @vendor_req_mutex: protects @vend_buf, ensures atomicity of split writes.
* @reg_atomic_mutex: ensures atomicity of indirect register accesses * @reg_atomic_mutex: ensures atomicity of indirect register accesses
* (accesses to RF and BBP). * (accesses to RF and BBP).
* @hw_atomic_mutex: ensures exclusive access to HW during critical * @hw_atomic_mutex: ensures exclusive access to HW during critical
...@@ -184,6 +184,8 @@ struct mt7601u_dev { ...@@ -184,6 +184,8 @@ struct mt7601u_dev {
struct mt7601u_eeprom_params *ee; struct mt7601u_eeprom_params *ee;
struct mutex vendor_req_mutex; struct mutex vendor_req_mutex;
void *vend_buf;
struct mutex reg_atomic_mutex; struct mutex reg_atomic_mutex;
struct mutex hw_atomic_mutex; struct mutex hw_atomic_mutex;
......
...@@ -92,8 +92,7 @@ void mt7601u_complete_urb(struct urb *urb) ...@@ -92,8 +92,7 @@ void mt7601u_complete_urb(struct urb *urb)
complete(cmpl); complete(cmpl);
} }
static int int mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req,
__mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req,
const u8 direction, const u16 val, const u16 offset, const u8 direction, const u16 val, const u16 offset,
void *buf, const size_t buflen) void *buf, const size_t buflen)
{ {
...@@ -110,6 +109,8 @@ __mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req, ...@@ -110,6 +109,8 @@ __mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req,
trace_mt_vend_req(dev, pipe, req, req_type, val, offset, trace_mt_vend_req(dev, pipe, req, req_type, val, offset,
buf, buflen, ret); buf, buflen, ret);
if (ret == -ENODEV)
set_bit(MT7601U_STATE_REMOVED, &dev->state);
if (ret >= 0 || ret == -ENODEV) if (ret >= 0 || ret == -ENODEV)
return ret; return ret;
...@@ -122,25 +123,6 @@ __mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req, ...@@ -122,25 +123,6 @@ __mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req,
return ret; return ret;
} }
int
mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req,
const u8 direction, const u16 val, const u16 offset,
void *buf, const size_t buflen)
{
int ret;
mutex_lock(&dev->vendor_req_mutex);
ret = __mt7601u_vendor_request(dev, req, direction, val, offset,
buf, buflen);
if (ret == -ENODEV)
set_bit(MT7601U_STATE_REMOVED, &dev->state);
mutex_unlock(&dev->vendor_req_mutex);
return ret;
}
void mt7601u_vendor_reset(struct mt7601u_dev *dev) void mt7601u_vendor_reset(struct mt7601u_dev *dev)
{ {
mt7601u_vendor_request(dev, MT_VEND_DEV_MODE, USB_DIR_OUT, mt7601u_vendor_request(dev, MT_VEND_DEV_MODE, USB_DIR_OUT,
...@@ -150,19 +132,21 @@ void mt7601u_vendor_reset(struct mt7601u_dev *dev) ...@@ -150,19 +132,21 @@ void mt7601u_vendor_reset(struct mt7601u_dev *dev)
u32 mt7601u_rr(struct mt7601u_dev *dev, u32 offset) u32 mt7601u_rr(struct mt7601u_dev *dev, u32 offset)
{ {
int ret; int ret;
__le32 reg; u32 val = ~0;
u32 val;
WARN_ONCE(offset > USHRT_MAX, "read high off:%08x", offset); WARN_ONCE(offset > USHRT_MAX, "read high off:%08x", offset);
mutex_lock(&dev->vendor_req_mutex);
ret = mt7601u_vendor_request(dev, MT_VEND_MULTI_READ, USB_DIR_IN, ret = mt7601u_vendor_request(dev, MT_VEND_MULTI_READ, USB_DIR_IN,
0, offset, &reg, sizeof(reg)); 0, offset, dev->vend_buf, MT_VEND_BUF);
val = le32_to_cpu(reg); if (ret == MT_VEND_BUF)
if (ret > 0 && ret != sizeof(reg)) { val = get_unaligned_le32(dev->vend_buf);
else if (ret > 0)
dev_err(dev->dev, "Error: wrong size read:%d off:%08x\n", dev_err(dev->dev, "Error: wrong size read:%d off:%08x\n",
ret, offset); ret, offset);
val = ~0;
} mutex_unlock(&dev->vendor_req_mutex);
trace_reg_read(dev, offset, val); trace_reg_read(dev, offset, val);
return val; return val;
...@@ -173,12 +157,17 @@ int mt7601u_vendor_single_wr(struct mt7601u_dev *dev, const u8 req, ...@@ -173,12 +157,17 @@ int mt7601u_vendor_single_wr(struct mt7601u_dev *dev, const u8 req,
{ {
int ret; int ret;
mutex_lock(&dev->vendor_req_mutex);
ret = mt7601u_vendor_request(dev, req, USB_DIR_OUT, ret = mt7601u_vendor_request(dev, req, USB_DIR_OUT,
val & 0xffff, offset, NULL, 0); val & 0xffff, offset, NULL, 0);
if (ret) if (!ret)
return ret; ret = mt7601u_vendor_request(dev, req, USB_DIR_OUT,
return mt7601u_vendor_request(dev, req, USB_DIR_OUT,
val >> 16, offset + 2, NULL, 0); val >> 16, offset + 2, NULL, 0);
mutex_unlock(&dev->vendor_req_mutex);
return ret;
} }
void mt7601u_wr(struct mt7601u_dev *dev, u32 offset, u32 val) void mt7601u_wr(struct mt7601u_dev *dev, u32 offset, u32 val)
...@@ -275,6 +264,12 @@ static int mt7601u_probe(struct usb_interface *usb_intf, ...@@ -275,6 +264,12 @@ static int mt7601u_probe(struct usb_interface *usb_intf,
usb_set_intfdata(usb_intf, dev); usb_set_intfdata(usb_intf, dev);
dev->vend_buf = devm_kmalloc(dev->dev, MT_VEND_BUF, GFP_KERNEL);
if (!dev->vend_buf) {
ret = -ENOMEM;
goto err;
}
ret = mt7601u_assign_pipes(usb_intf, dev); ret = mt7601u_assign_pipes(usb_intf, dev);
if (ret) if (ret)
goto err; goto err;
......
...@@ -23,6 +23,8 @@ ...@@ -23,6 +23,8 @@
#define MT_VEND_DEV_MODE_RESET 1 #define MT_VEND_DEV_MODE_RESET 1
#define MT_VEND_BUF sizeof(__le32)
enum mt_vendor_req { enum mt_vendor_req {
MT_VEND_DEV_MODE = 1, MT_VEND_DEV_MODE = 1,
MT_VEND_WRITE = 2, MT_VEND_WRITE = 2,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment