Commit 06d3fdfc authored by Sai Teja Aluvala's avatar Sai Teja Aluvala Committed by Luiz Augusto von Dentz

Bluetooth: hci_qca: Add qcom devcoredump support

Intercept debug exception events from QCA controller and put them into
a devcoredump using hci devcoredump APIs of hci_core
Signed-off-by: default avatarSai Teja Aluvala <quic_saluvala@quicinc.com>

V2 -> V3:
---------
changed hci_coredump_qca function

V1 -> V2:
---------
Updated to work with the updated HCI devcoredump API.
Signed-off-by: default avatarLuiz Augusto von Dentz <luiz.von.dentz@intel.com>
parent 6ce95a30
...@@ -117,9 +117,7 @@ enum qca_memdump_states { ...@@ -117,9 +117,7 @@ enum qca_memdump_states {
QCA_MEMDUMP_TIMEOUT, QCA_MEMDUMP_TIMEOUT,
}; };
struct qca_memdump_data { struct qca_memdump_info {
char *memdump_buf_head;
char *memdump_buf_tail;
u32 current_seq_no; u32 current_seq_no;
u32 received_dump; u32 received_dump;
u32 ram_dump_size; u32 ram_dump_size;
...@@ -160,13 +158,15 @@ struct qca_data { ...@@ -160,13 +158,15 @@ struct qca_data {
struct work_struct ws_tx_vote_off; struct work_struct ws_tx_vote_off;
struct work_struct ctrl_memdump_evt; struct work_struct ctrl_memdump_evt;
struct delayed_work ctrl_memdump_timeout; struct delayed_work ctrl_memdump_timeout;
struct qca_memdump_data *qca_memdump; struct qca_memdump_info *qca_memdump;
unsigned long flags; unsigned long flags;
struct completion drop_ev_comp; struct completion drop_ev_comp;
wait_queue_head_t suspend_wait_q; wait_queue_head_t suspend_wait_q;
enum qca_memdump_states memdump_state; enum qca_memdump_states memdump_state;
struct mutex hci_memdump_lock; struct mutex hci_memdump_lock;
u16 fw_version;
u16 controller_id;
/* For debugging purpose */ /* For debugging purpose */
u64 ibs_sent_wacks; u64 ibs_sent_wacks;
u64 ibs_sent_slps; u64 ibs_sent_slps;
...@@ -233,6 +233,7 @@ static void qca_regulator_disable(struct qca_serdev *qcadev); ...@@ -233,6 +233,7 @@ static void qca_regulator_disable(struct qca_serdev *qcadev);
static void qca_power_shutdown(struct hci_uart *hu); static void qca_power_shutdown(struct hci_uart *hu);
static int qca_power_off(struct hci_dev *hdev); static int qca_power_off(struct hci_dev *hdev);
static void qca_controller_memdump(struct work_struct *work); static void qca_controller_memdump(struct work_struct *work);
static void qca_dmp_hdr(struct hci_dev *hdev, struct sk_buff *skb);
static enum qca_btsoc_type qca_soc_type(struct hci_uart *hu) static enum qca_btsoc_type qca_soc_type(struct hci_uart *hu)
{ {
...@@ -980,6 +981,28 @@ static int qca_recv_acl_data(struct hci_dev *hdev, struct sk_buff *skb) ...@@ -980,6 +981,28 @@ static int qca_recv_acl_data(struct hci_dev *hdev, struct sk_buff *skb)
return hci_recv_frame(hdev, skb); return hci_recv_frame(hdev, skb);
} }
static void qca_dmp_hdr(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_uart *hu = hci_get_drvdata(hdev);
struct qca_data *qca = hu->priv;
char buf[80];
snprintf(buf, sizeof(buf), "Controller Name: 0x%x\n",
qca->controller_id);
skb_put_data(skb, buf, strlen(buf));
snprintf(buf, sizeof(buf), "Firmware Version: 0x%x\n",
qca->fw_version);
skb_put_data(skb, buf, strlen(buf));
snprintf(buf, sizeof(buf), "Vendor:Qualcomm\n");
skb_put_data(skb, buf, strlen(buf));
snprintf(buf, sizeof(buf), "Driver: %s\n",
hu->serdev->dev.driver->name);
skb_put_data(skb, buf, strlen(buf));
}
static void qca_controller_memdump(struct work_struct *work) static void qca_controller_memdump(struct work_struct *work)
{ {
struct qca_data *qca = container_of(work, struct qca_data, struct qca_data *qca = container_of(work, struct qca_data,
...@@ -987,13 +1010,11 @@ static void qca_controller_memdump(struct work_struct *work) ...@@ -987,13 +1010,11 @@ static void qca_controller_memdump(struct work_struct *work)
struct hci_uart *hu = qca->hu; struct hci_uart *hu = qca->hu;
struct sk_buff *skb; struct sk_buff *skb;
struct qca_memdump_event_hdr *cmd_hdr; struct qca_memdump_event_hdr *cmd_hdr;
struct qca_memdump_data *qca_memdump = qca->qca_memdump; struct qca_memdump_info *qca_memdump = qca->qca_memdump;
struct qca_dump_size *dump; struct qca_dump_size *dump;
char *memdump_buf;
char nullBuff[QCA_DUMP_PACKET_SIZE] = { 0 };
u16 seq_no; u16 seq_no;
u32 dump_size;
u32 rx_size; u32 rx_size;
int ret = 0;
enum qca_btsoc_type soc_type = qca_soc_type(hu); enum qca_btsoc_type soc_type = qca_soc_type(hu);
while ((skb = skb_dequeue(&qca->rx_memdump_q))) { while ((skb = skb_dequeue(&qca->rx_memdump_q))) {
...@@ -1009,7 +1030,7 @@ static void qca_controller_memdump(struct work_struct *work) ...@@ -1009,7 +1030,7 @@ static void qca_controller_memdump(struct work_struct *work)
} }
if (!qca_memdump) { if (!qca_memdump) {
qca_memdump = kzalloc(sizeof(struct qca_memdump_data), qca_memdump = kzalloc(sizeof(struct qca_memdump_info),
GFP_ATOMIC); GFP_ATOMIC);
if (!qca_memdump) { if (!qca_memdump) {
mutex_unlock(&qca->hci_memdump_lock); mutex_unlock(&qca->hci_memdump_lock);
...@@ -1035,44 +1056,49 @@ static void qca_controller_memdump(struct work_struct *work) ...@@ -1035,44 +1056,49 @@ static void qca_controller_memdump(struct work_struct *work)
set_bit(QCA_IBS_DISABLED, &qca->flags); set_bit(QCA_IBS_DISABLED, &qca->flags);
set_bit(QCA_MEMDUMP_COLLECTION, &qca->flags); set_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
dump = (void *) skb->data; dump = (void *) skb->data;
dump_size = __le32_to_cpu(dump->dump_size); qca_memdump->ram_dump_size = __le32_to_cpu(dump->dump_size);
if (!(dump_size)) { if (!(qca_memdump->ram_dump_size)) {
bt_dev_err(hu->hdev, "Rx invalid memdump size"); bt_dev_err(hu->hdev, "Rx invalid memdump size");
kfree(qca_memdump); kfree(qca_memdump);
kfree_skb(skb); kfree_skb(skb);
qca->qca_memdump = NULL;
mutex_unlock(&qca->hci_memdump_lock); mutex_unlock(&qca->hci_memdump_lock);
return; return;
} }
bt_dev_info(hu->hdev, "QCA collecting dump of size:%u",
dump_size);
queue_delayed_work(qca->workqueue, queue_delayed_work(qca->workqueue,
&qca->ctrl_memdump_timeout, &qca->ctrl_memdump_timeout,
msecs_to_jiffies(MEMDUMP_TIMEOUT_MS) msecs_to_jiffies(MEMDUMP_TIMEOUT_MS));
); skb_pull(skb, sizeof(qca_memdump->ram_dump_size));
qca_memdump->current_seq_no = 0;
skb_pull(skb, sizeof(dump_size)); qca_memdump->received_dump = 0;
memdump_buf = vmalloc(dump_size); ret = hci_devcd_init(hu->hdev, qca_memdump->ram_dump_size);
qca_memdump->ram_dump_size = dump_size; bt_dev_info(hu->hdev, "hci_devcd_init Return:%d",
qca_memdump->memdump_buf_head = memdump_buf; ret);
qca_memdump->memdump_buf_tail = memdump_buf; if (ret < 0) {
kfree(qca->qca_memdump);
qca->qca_memdump = NULL;
qca->memdump_state = QCA_MEMDUMP_COLLECTED;
cancel_delayed_work(&qca->ctrl_memdump_timeout);
clear_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
mutex_unlock(&qca->hci_memdump_lock);
return;
} }
memdump_buf = qca_memdump->memdump_buf_tail; bt_dev_info(hu->hdev, "QCA collecting dump of size:%u",
qca_memdump->ram_dump_size);
}
/* If sequence no 0 is missed then there is no point in /* If sequence no 0 is missed then there is no point in
* accepting the other sequences. * accepting the other sequences.
*/ */
if (!memdump_buf) { if (!test_bit(QCA_MEMDUMP_COLLECTION, &qca->flags)) {
bt_dev_err(hu->hdev, "QCA: Discarding other packets"); bt_dev_err(hu->hdev, "QCA: Discarding other packets");
kfree(qca_memdump); kfree(qca_memdump);
kfree_skb(skb); kfree_skb(skb);
qca->qca_memdump = NULL;
mutex_unlock(&qca->hci_memdump_lock); mutex_unlock(&qca->hci_memdump_lock);
return; return;
} }
/* There could be chance of missing some packets from /* There could be chance of missing some packets from
* the controller. In such cases let us store the dummy * the controller. In such cases let us store the dummy
* packets in the buffer. * packets in the buffer.
...@@ -1094,8 +1120,8 @@ static void qca_controller_memdump(struct work_struct *work) ...@@ -1094,8 +1120,8 @@ static void qca_controller_memdump(struct work_struct *work)
qca_memdump->received_dump); qca_memdump->received_dump);
break; break;
} }
memcpy(memdump_buf, nullBuff, QCA_DUMP_PACKET_SIZE); hci_devcd_append_pattern(hu->hdev, 0x00,
memdump_buf = memdump_buf + QCA_DUMP_PACKET_SIZE; QCA_DUMP_PACKET_SIZE);
qca_memdump->received_dump += QCA_DUMP_PACKET_SIZE; qca_memdump->received_dump += QCA_DUMP_PACKET_SIZE;
qca_memdump->current_seq_no++; qca_memdump->current_seq_no++;
} }
...@@ -1103,34 +1129,29 @@ static void qca_controller_memdump(struct work_struct *work) ...@@ -1103,34 +1129,29 @@ static void qca_controller_memdump(struct work_struct *work)
rx_size = qca_memdump->received_dump + skb->len; rx_size = qca_memdump->received_dump + skb->len;
if (rx_size <= qca_memdump->ram_dump_size) { if (rx_size <= qca_memdump->ram_dump_size) {
if ((seq_no != QCA_LAST_SEQUENCE_NUM) && if ((seq_no != QCA_LAST_SEQUENCE_NUM) &&
(seq_no != qca_memdump->current_seq_no)) (seq_no != qca_memdump->current_seq_no)) {
bt_dev_err(hu->hdev, bt_dev_err(hu->hdev,
"QCA memdump unexpected packet %d", "QCA memdump unexpected packet %d",
seq_no); seq_no);
}
bt_dev_dbg(hu->hdev, bt_dev_dbg(hu->hdev,
"QCA memdump packet %d with length %d", "QCA memdump packet %d with length %d",
seq_no, skb->len); seq_no, skb->len);
memcpy(memdump_buf, (unsigned char *)skb->data, hci_devcd_append(hu->hdev, skb);
skb->len); qca_memdump->current_seq_no += 1;
memdump_buf = memdump_buf + skb->len; qca_memdump->received_dump = rx_size;
qca_memdump->memdump_buf_tail = memdump_buf;
qca_memdump->current_seq_no = seq_no + 1;
qca_memdump->received_dump += skb->len;
} else { } else {
bt_dev_err(hu->hdev, bt_dev_err(hu->hdev,
"QCA memdump received %d, no space for packet %d", "QCA memdump received no space for packet %d",
qca_memdump->received_dump, seq_no); qca_memdump->current_seq_no);
} }
qca->qca_memdump = qca_memdump;
kfree_skb(skb);
if (seq_no == QCA_LAST_SEQUENCE_NUM) { if (seq_no == QCA_LAST_SEQUENCE_NUM) {
bt_dev_info(hu->hdev, bt_dev_info(hu->hdev,
"QCA memdump Done, received %d, total %d", "QCA memdump Done, received %d, total %d",
qca_memdump->received_dump, qca_memdump->received_dump,
qca_memdump->ram_dump_size); qca_memdump->ram_dump_size);
memdump_buf = qca_memdump->memdump_buf_head; hci_devcd_complete(hu->hdev);
dev_coredumpv(&hu->serdev->dev, memdump_buf,
qca_memdump->received_dump, GFP_KERNEL);
cancel_delayed_work(&qca->ctrl_memdump_timeout); cancel_delayed_work(&qca->ctrl_memdump_timeout);
kfree(qca->qca_memdump); kfree(qca->qca_memdump);
qca->qca_memdump = NULL; qca->qca_memdump = NULL;
...@@ -1541,8 +1562,8 @@ static void qca_hw_error(struct hci_dev *hdev, u8 code) ...@@ -1541,8 +1562,8 @@ static void qca_hw_error(struct hci_dev *hdev, u8 code)
mutex_lock(&qca->hci_memdump_lock); mutex_lock(&qca->hci_memdump_lock);
if (qca->memdump_state != QCA_MEMDUMP_COLLECTED) { if (qca->memdump_state != QCA_MEMDUMP_COLLECTED) {
bt_dev_err(hu->hdev, "clearing allocated memory due to memdump timeout"); bt_dev_err(hu->hdev, "clearing allocated memory due to memdump timeout");
hci_devcd_abort(hu->hdev);
if (qca->qca_memdump) { if (qca->qca_memdump) {
vfree(qca->qca_memdump->memdump_buf_head);
kfree(qca->qca_memdump); kfree(qca->qca_memdump);
qca->qca_memdump = NULL; qca->qca_memdump = NULL;
} }
...@@ -1706,6 +1727,17 @@ static int qca_power_on(struct hci_dev *hdev) ...@@ -1706,6 +1727,17 @@ static int qca_power_on(struct hci_dev *hdev)
return ret; return ret;
} }
static void hci_coredump_qca(struct hci_dev *hdev)
{
static const u8 param[] = { 0x26 };
struct sk_buff *skb;
skb = __hci_cmd_sync(hdev, 0xfc0c, 1, param, HCI_CMD_TIMEOUT);
if (IS_ERR(skb))
bt_dev_err(hdev, "%s: trigger crash failed (%ld)", __func__, PTR_ERR(skb));
kfree_skb(skb);
}
static int qca_setup(struct hci_uart *hu) static int qca_setup(struct hci_uart *hu)
{ {
struct hci_dev *hdev = hu->hdev; struct hci_dev *hdev = hu->hdev;
...@@ -1820,6 +1852,9 @@ static int qca_setup(struct hci_uart *hu) ...@@ -1820,6 +1852,9 @@ static int qca_setup(struct hci_uart *hu)
hu->hdev->set_bdaddr = qca_set_bdaddr_rome; hu->hdev->set_bdaddr = qca_set_bdaddr_rome;
else else
hu->hdev->set_bdaddr = qca_set_bdaddr; hu->hdev->set_bdaddr = qca_set_bdaddr;
qca->fw_version = le16_to_cpu(ver.patch_ver);
qca->controller_id = le16_to_cpu(ver.rom_ver);
hci_devcd_register(hdev, hci_coredump_qca, qca_dmp_hdr, NULL);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment