Commit e5a87625 authored by John W. Linville's avatar John W. Linville
parents 9b4e9e75 83f84d7b
......@@ -295,7 +295,7 @@ static int iwl_alive_notify(struct iwl_priv *priv)
static int iwl_verify_sec_sparse(struct iwl_priv *priv,
const struct fw_desc *fw_desc)
{
__le32 *image = (__le32 *)fw_desc->v_addr;
__le32 *image = (__le32 *)fw_desc->data;
u32 len = fw_desc->len;
u32 val;
u32 i;
......@@ -319,7 +319,7 @@ static int iwl_verify_sec_sparse(struct iwl_priv *priv,
static void iwl_print_mismatch_sec(struct iwl_priv *priv,
const struct fw_desc *fw_desc)
{
__le32 *image = (__le32 *)fw_desc->v_addr;
__le32 *image = (__le32 *)fw_desc->data;
u32 len = fw_desc->len;
u32 val;
u32 offs;
......
......@@ -64,6 +64,7 @@
#include <linux/dma-mapping.h>
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
#include "iwl-drv.h"
#include "iwl-debug.h"
......@@ -164,10 +165,8 @@ struct fw_sec {
static void iwl_free_fw_desc(struct iwl_drv *drv, struct fw_desc *desc)
{
if (desc->v_addr)
dma_free_coherent(drv->trans->dev, desc->len,
desc->v_addr, desc->p_addr);
desc->v_addr = NULL;
vfree(desc->data);
desc->data = NULL;
desc->len = 0;
}
......@@ -186,21 +185,24 @@ static void iwl_dealloc_ucode(struct iwl_drv *drv)
}
static int iwl_alloc_fw_desc(struct iwl_drv *drv, struct fw_desc *desc,
struct fw_sec *sec)
struct fw_sec *sec)
{
if (!sec || !sec->size) {
desc->v_addr = NULL;
void *data;
desc->data = NULL;
if (!sec || !sec->size)
return -EINVAL;
}
desc->v_addr = dma_alloc_coherent(drv->trans->dev, sec->size,
&desc->p_addr, GFP_KERNEL);
if (!desc->v_addr)
data = vmalloc(sec->size);
if (!data)
return -ENOMEM;
desc->len = sec->size;
desc->offset = sec->offset;
memcpy(desc->v_addr, sec->data, sec->size);
memcpy(data, sec->data, desc->len);
desc->data = data;
return 0;
}
......
......@@ -124,8 +124,7 @@ struct iwl_ucode_capabilities {
/* one for each uCode image (inst/data, init/runtime/wowlan) */
struct fw_desc {
dma_addr_t p_addr; /* hardware address */
void *v_addr; /* software address */
const void *data; /* vmalloc'ed data */
u32 len; /* size in bytes */
u32 offset; /* offset in the device */
};
......
......@@ -263,8 +263,6 @@ MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
/* PCI registers */
#define PCI_CFG_RETRY_TIMEOUT 0x041
#ifndef CONFIG_IWLWIFI_IDI
static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
......@@ -307,8 +305,6 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
pci_set_drvdata(pdev, NULL);
}
#endif /* CONFIG_IWLWIFI_IDI */
#ifdef CONFIG_PM_SLEEP
static int iwl_pci_suspend(struct device *device)
......@@ -353,15 +349,6 @@ static SIMPLE_DEV_PM_OPS(iwl_dev_pm_ops, iwl_pci_suspend, iwl_pci_resume);
#endif
#ifdef CONFIG_IWLWIFI_IDI
/*
* Defined externally in iwl-idi.c
*/
int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
void __devexit iwl_pci_remove(struct pci_dev *pdev);
#endif /* CONFIG_IWLWIFI_IDI */
static struct pci_driver iwl_pci_driver = {
.name = DRV_NAME,
.id_table = iwl_hw_card_ids,
......
......@@ -311,7 +311,7 @@ void iwl_trans_pcie_free(struct iwl_trans *trans);
******************************************************/
void iwl_bg_rx_replenish(struct work_struct *data);
void iwl_irq_tasklet(struct iwl_trans *trans);
void iwlagn_rx_replenish(struct iwl_trans *trans);
void iwl_rx_replenish(struct iwl_trans *trans);
void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans,
struct iwl_rx_queue *q);
......
......@@ -35,10 +35,6 @@
#include "internal.h"
#include "iwl-op-mode.h"
#ifdef CONFIG_IWLWIFI_IDI
#include "iwl-amfh.h"
#endif
/******************************************************************************
*
* RX path functions
......@@ -181,15 +177,15 @@ void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans,
}
/**
* iwlagn_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
* iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
*/
static inline __le32 iwlagn_dma_addr2rbd_ptr(dma_addr_t dma_addr)
static inline __le32 iwl_dma_addr2rbd_ptr(dma_addr_t dma_addr)
{
return cpu_to_le32((u32)(dma_addr >> 8));
}
/**
* iwlagn_rx_queue_restock - refill RX queue from pre-allocated pool
* iwl_rx_queue_restock - refill RX queue from pre-allocated pool
*
* If there are slots in the RX queue that need to be restocked,
* and we have free pre-allocated buffers, fill the ranks as much
......@@ -199,7 +195,7 @@ static inline __le32 iwlagn_dma_addr2rbd_ptr(dma_addr_t dma_addr)
* also updates the memory address in the firmware to reference the new
* target buffer.
*/
static void iwlagn_rx_queue_restock(struct iwl_trans *trans)
static void iwl_rx_queue_restock(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rx_queue *rxq = &trans_pcie->rxq;
......@@ -207,6 +203,17 @@ static void iwlagn_rx_queue_restock(struct iwl_trans *trans)
struct iwl_rx_mem_buffer *rxb;
unsigned long flags;
/*
* If the device isn't enabled - not need to try to add buffers...
* This can happen when we stop the device and still have an interrupt
* pending. We stop the APM before we sync the interrupts / tasklets
* because we have to (see comment there). On the other hand, since
* the APM is stopped, we cannot access the HW (in particular not prph).
* So don't try to restock if the APM has been already stopped.
*/
if (!test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status))
return;
spin_lock_irqsave(&rxq->lock, flags);
while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
/* The overwritten rxb must be a used one */
......@@ -219,7 +226,7 @@ static void iwlagn_rx_queue_restock(struct iwl_trans *trans)
list_del(element);
/* Point to Rx buffer via next RBD in circular buffer */
rxq->bd[rxq->write] = iwlagn_dma_addr2rbd_ptr(rxb->page_dma);
rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(rxb->page_dma);
rxq->queue[rxq->write] = rxb;
rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
rxq->free_count--;
......@@ -230,7 +237,6 @@ static void iwlagn_rx_queue_restock(struct iwl_trans *trans)
if (rxq->free_count <= RX_LOW_WATERMARK)
schedule_work(&trans_pcie->rx_replenish);
/* If we've added more space for the firmware to place data, tell it.
* Increment device's write pointer in multiples of 8. */
if (rxq->write_actual != (rxq->write & ~0x7)) {
......@@ -241,15 +247,16 @@ static void iwlagn_rx_queue_restock(struct iwl_trans *trans)
}
}
/**
* iwlagn_rx_replenish - Move all used packet from rx_used to rx_free
*
* When moving to rx_free an SKB is allocated for the slot.
/*
* iwl_rx_allocate - allocate a page for each used RBD
*
* Also restock the Rx queue via iwl_rx_queue_restock.
* This is called as a scheduled work item (except for during initialization)
* A used RBD is an Rx buffer that has been given to the stack. To use it again
* a page must be allocated and the RBD must point to the page. This function
* doesn't change the HW pointer but handles the list of pages that is used by
* iwl_rx_queue_restock. The latter function will update the HW to use the newly
* allocated buffers.
*/
static void iwlagn_rx_allocate(struct iwl_trans *trans, gfp_t priority)
static void iwl_rx_allocate(struct iwl_trans *trans, gfp_t priority)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rx_queue *rxq = &trans_pcie->rxq;
......@@ -328,23 +335,31 @@ static void iwlagn_rx_allocate(struct iwl_trans *trans, gfp_t priority)
}
}
void iwlagn_rx_replenish(struct iwl_trans *trans)
/*
* iwl_rx_replenish - Move all used buffers from rx_used to rx_free
*
* When moving to rx_free an page is allocated for the slot.
*
* Also restock the Rx queue via iwl_rx_queue_restock.
* This is called as a scheduled work item (except for during initialization)
*/
void iwl_rx_replenish(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
unsigned long flags;
iwlagn_rx_allocate(trans, GFP_KERNEL);
iwl_rx_allocate(trans, GFP_KERNEL);
spin_lock_irqsave(&trans_pcie->irq_lock, flags);
iwlagn_rx_queue_restock(trans);
iwl_rx_queue_restock(trans);
spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
}
static void iwlagn_rx_replenish_now(struct iwl_trans *trans)
static void iwl_rx_replenish_now(struct iwl_trans *trans)
{
iwlagn_rx_allocate(trans, GFP_ATOMIC);
iwl_rx_allocate(trans, GFP_ATOMIC);
iwlagn_rx_queue_restock(trans);
iwl_rx_queue_restock(trans);
}
void iwl_bg_rx_replenish(struct work_struct *data)
......@@ -352,7 +367,7 @@ void iwl_bg_rx_replenish(struct work_struct *data)
struct iwl_trans_pcie *trans_pcie =
container_of(data, struct iwl_trans_pcie, rx_replenish);
iwlagn_rx_replenish(trans_pcie->trans);
iwl_rx_replenish(trans_pcie->trans);
}
static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
......@@ -530,7 +545,7 @@ static void iwl_rx_handle(struct iwl_trans *trans)
count++;
if (count >= 8) {
rxq->read = i;
iwlagn_rx_replenish_now(trans);
iwl_rx_replenish_now(trans);
count = 0;
}
}
......@@ -539,9 +554,9 @@ static void iwl_rx_handle(struct iwl_trans *trans)
/* Backtrack one entry */
rxq->read = i;
if (fill_rx)
iwlagn_rx_replenish_now(trans);
iwl_rx_replenish_now(trans);
else
iwlagn_rx_queue_restock(trans);
iwl_rx_queue_restock(trans);
}
/**
......@@ -723,11 +738,9 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
/* Disable periodic interrupt; we use it as just a one-shot. */
iwl_write8(trans, CSR_INT_PERIODIC_REG,
CSR_INT_PERIODIC_DIS);
#ifdef CONFIG_IWLWIFI_IDI
iwl_amfh_rx_handler();
#else
iwl_rx_handle(trans);
#endif
/*
* Enable periodic interrupt in 8 msec only if we received
* real RX interrupt (instead of just periodic int), to catch
......
......@@ -216,7 +216,7 @@ static int iwl_rx_init(struct iwl_trans *trans)
rxq->free_count = 0;
spin_unlock_irqrestore(&rxq->lock, flags);
iwlagn_rx_replenish(trans);
iwl_rx_replenish(trans);
iwl_trans_rx_hw_init(trans, rxq);
......@@ -855,10 +855,8 @@ static int iwl_nic_init(struct iwl_trans *trans)
iwl_op_mode_nic_config(trans->op_mode);
#ifndef CONFIG_IWLWIFI_IDI
/* Allocate the RX queue, or reset if it is already allocated */
iwl_rx_init(trans);
#endif
/* Allocate or reset and init all Tx and Command queues */
if (iwl_tx_init(trans))
......@@ -925,13 +923,10 @@ static int iwl_prepare_card_hw(struct iwl_trans *trans)
/*
* ucode
*/
static int iwl_load_section(struct iwl_trans *trans, u8 section_num,
const struct fw_desc *section)
static int iwl_load_firmware_chunk(struct iwl_trans *trans, u32 dst_addr,
dma_addr_t phy_addr, u32 byte_cnt)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
dma_addr_t phy_addr = section->p_addr;
u32 byte_cnt = section->len;
u32 dst_addr = section->offset;
int ret;
trans_pcie->ucode_write_complete = false;
......@@ -945,8 +940,8 @@ static int iwl_load_section(struct iwl_trans *trans, u8 section_num,
dst_addr);
iwl_write_direct32(trans,
FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
iwl_write_direct32(trans,
FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
......@@ -965,33 +960,64 @@ static int iwl_load_section(struct iwl_trans *trans, u8 section_num,
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
section_num);
ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
trans_pcie->ucode_write_complete, 5 * HZ);
if (!ret) {
IWL_ERR(trans, "Could not load the [%d] uCode section\n",
section_num);
IWL_ERR(trans, "Failed to load firmware chunk!\n");
return -ETIMEDOUT;
}
return 0;
}
static int iwl_load_given_ucode(struct iwl_trans *trans,
const struct fw_img *image)
static int iwl_load_section(struct iwl_trans *trans, u8 section_num,
const struct fw_desc *section)
{
u8 *v_addr;
dma_addr_t p_addr;
u32 offset;
int ret = 0;
int i;
for (i = 0; i < IWL_UCODE_SECTION_MAX; i++) {
if (!image->sec[i].p_addr)
break;
IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
section_num);
ret = iwl_load_section(trans, i, &image->sec[i]);
if (ret)
return ret;
v_addr = dma_alloc_coherent(trans->dev, PAGE_SIZE, &p_addr, GFP_KERNEL);
if (!v_addr)
return -ENOMEM;
for (offset = 0; offset < section->len; offset += PAGE_SIZE) {
u32 copy_size;
copy_size = min_t(u32, PAGE_SIZE, section->len - offset);
memcpy(v_addr, (u8 *)section->data + offset, copy_size);
ret = iwl_load_firmware_chunk(trans, section->offset + offset,
p_addr, copy_size);
if (ret) {
IWL_ERR(trans,
"Could not load the [%d] uCode section\n",
section_num);
break;
}
}
dma_free_coherent(trans->dev, PAGE_SIZE, v_addr, p_addr);
return ret;
}
static int iwl_load_given_ucode(struct iwl_trans *trans,
const struct fw_img *image)
{
int i, ret = 0;
for (i = 0; i < IWL_UCODE_SECTION_MAX; i++) {
if (!image->sec[i].data)
break;
ret = iwl_load_section(trans, i, &image->sec[i]);
if (ret)
return ret;
}
/* Remove all resets to allow NIC to operate */
iwl_write32(trans, CSR_RESET, 0);
......@@ -1184,9 +1210,8 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
*/
if (test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status)) {
iwl_trans_tx_stop(trans);
#ifndef CONFIG_IWLWIFI_IDI
iwl_trans_rx_stop(trans);
#endif
/* Power-down device's busmaster DMA clocks */
iwl_write_prph(trans, APMG_CLK_DIS_REG,
APMG_CLK_VAL_DMA_CLK_RQT);
......@@ -1456,14 +1481,16 @@ static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans,
bool hw_rfkill;
unsigned long flags;
spin_lock_irqsave(&trans_pcie->irq_lock, flags);
iwl_disable_interrupts(trans);
spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
iwl_apm_stop(trans);
spin_lock_irqsave(&trans_pcie->irq_lock, flags);
iwl_disable_interrupts(trans);
spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
if (!op_mode_leaving) {
/*
* Even if we stop the HW, we still want the RF kill
......@@ -1551,9 +1578,8 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
iwl_trans_pcie_tx_free(trans);
#ifndef CONFIG_IWLWIFI_IDI
iwl_trans_pcie_rx_free(trans);
#endif
if (trans_pcie->irq_requested == true) {
free_irq(trans_pcie->irq, trans);
iwl_free_isr_ict(trans);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment