Commit 87687ac9 authored by Roland Vossen's avatar Roland Vossen Committed by Greg Kroah-Hartman

staging: brcm80211: removed unused code in dma.c

Code cleanup. Preprocessor symbol BCMDMASGLISTOSL was never defined.
Reviewed-by: default avatarArend van Spriel <arend@broadcom.com>
Reviewed-by: default avatarPieter-Paul Giesberts <pieterpg@broadcom.com>
Signed-off-by: default avatarRoland Vossen <rvossen@broadcom.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@suse.de>
parent 0310b1bc
...@@ -207,18 +207,6 @@ static uint dma_msg_level; ...@@ -207,18 +207,6 @@ static uint dma_msg_level;
#define DI_INFO(dmah) ((dma_info_t *)dmah) #define DI_INFO(dmah) ((dma_info_t *)dmah)
/* One physical DMA segment */
struct dma_seg {
unsigned long addr;
u32 length;
};
struct dma_seg_map {
uint origsize; /* Size of the virtual packet */
uint nsegs;
struct dma_seg segs[MAX_DMA_SEGS];
};
/* /*
* DMA Descriptor * DMA Descriptor
* Descriptors are only read by the hardware, never written back. * Descriptors are only read by the hardware, never written back.
...@@ -261,7 +249,6 @@ struct dma_info { ...@@ -261,7 +249,6 @@ struct dma_info {
u16 txout; /* index of next descriptor to post */ u16 txout; /* index of next descriptor to post */
/* pointer to parallel array of pointers to packets */ /* pointer to parallel array of pointers to packets */
struct sk_buff **txp; struct sk_buff **txp;
struct dma_seg_map *txp_dmah; /* DMA MAP meta-data handle */
/* Aligned physical address of descriptor ring */ /* Aligned physical address of descriptor ring */
unsigned long txdpa; unsigned long txdpa;
/* Original physical address of descriptor ring */ /* Original physical address of descriptor ring */
...@@ -278,7 +265,6 @@ struct dma_info { ...@@ -278,7 +265,6 @@ struct dma_info {
u16 rxout; /* index of next descriptor to post */ u16 rxout; /* index of next descriptor to post */
/* pointer to parallel array of pointers to packets */ /* pointer to parallel array of pointers to packets */
struct sk_buff **rxp; struct sk_buff **rxp;
struct dma_seg_map *rxp_dmah; /* DMA MAP meta-data handle */
/* Aligned physical address of descriptor ring */ /* Aligned physical address of descriptor ring */
unsigned long rxdpa; unsigned long rxdpa;
/* Original physical address of descriptor ring */ /* Original physical address of descriptor ring */
...@@ -312,16 +298,6 @@ struct dma_info { ...@@ -312,16 +298,6 @@ struct dma_info {
bool aligndesc_4k; bool aligndesc_4k;
}; };
/*
* DMA Scatter-gather list is supported. Note this is limited to TX
* direction only
*/
#ifdef BCMDMASGLISTOSL
#define DMASGLIST_ENAB true
#else
#define DMASGLIST_ENAB false
#endif /* BCMDMASGLISTOSL */
/* descriptor bumping macros */ /* descriptor bumping macros */
/* faster than %, but n must be power of 2 */ /* faster than %, but n must be power of 2 */
#define XXD(x, n) ((x) & ((n) - 1)) #define XXD(x, n) ((x) & ((n) - 1))
...@@ -524,23 +500,6 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih, ...@@ -524,23 +500,6 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
di->ddoffsethigh, di->dataoffsetlow, di->dataoffsethigh, di->ddoffsethigh, di->dataoffsetlow, di->dataoffsethigh,
di->addrext)); di->addrext));
/* allocate DMA mapping vectors */
if (DMASGLIST_ENAB) {
if (ntxd) {
size = ntxd * sizeof(struct dma_seg_map);
di->txp_dmah = kzalloc(size, GFP_ATOMIC);
if (di->txp_dmah == NULL)
goto fail;
}
if (nrxd) {
size = nrxd * sizeof(struct dma_seg_map);
di->rxp_dmah = kzalloc(size, GFP_ATOMIC);
if (di->rxp_dmah == NULL)
goto fail;
}
}
return (struct dma_pub *) di; return (struct dma_pub *) di;
fail: fail:
...@@ -635,12 +594,6 @@ void dma_detach(struct dma_pub *pub) ...@@ -635,12 +594,6 @@ void dma_detach(struct dma_pub *pub)
kfree(di->txp); kfree(di->txp);
kfree(di->rxp); kfree(di->rxp);
/* free tx packet DMA handles */
kfree(di->txp_dmah);
/* free rx packet DMA handles */
kfree(di->rxp_dmah);
/* free our private info structure */ /* free our private info structure */
kfree(di); kfree(di);
...@@ -910,10 +863,6 @@ bool dma_rxfill(struct dma_pub *pub) ...@@ -910,10 +863,6 @@ bool dma_rxfill(struct dma_pub *pub)
*/ */
*(u32 *) (p->data) = 0; *(u32 *) (p->data) = 0;
if (DMASGLIST_ENAB)
memset(&di->rxp_dmah[rxout], 0,
sizeof(struct dma_seg_map));
pa = pci_map_single(di->pbus, p->data, pa = pci_map_single(di->pbus, p->data,
di->rxbufsize, PCI_DMA_FROMDEVICE); di->rxbufsize, PCI_DMA_FROMDEVICE);
...@@ -1265,9 +1214,6 @@ int dma_txfast(struct dma_pub *pub, struct sk_buff *p0, bool commit) ...@@ -1265,9 +1214,6 @@ int dma_txfast(struct dma_pub *pub, struct sk_buff *p0, bool commit)
* allocating and initializing transmit descriptor entries. * allocating and initializing transmit descriptor entries.
*/ */
for (p = p0; p; p = next) { for (p = p0; p; p = next) {
uint nsegs, j;
struct dma_seg_map *map;
data = p->data; data = p->data;
len = p->len; len = p->len;
next = p->next; next = p->next;
...@@ -1280,53 +1226,25 @@ int dma_txfast(struct dma_pub *pub, struct sk_buff *p0, bool commit) ...@@ -1280,53 +1226,25 @@ int dma_txfast(struct dma_pub *pub, struct sk_buff *p0, bool commit)
continue; continue;
/* get physical address of buffer start */ /* get physical address of buffer start */
if (DMASGLIST_ENAB)
memset(&di->txp_dmah[txout], 0,
sizeof(struct dma_seg_map));
pa = pci_map_single(di->pbus, data, len, PCI_DMA_TODEVICE); pa = pci_map_single(di->pbus, data, len, PCI_DMA_TODEVICE);
if (DMASGLIST_ENAB) { flags = 0;
map = &di->txp_dmah[txout]; if (p == p0)
flags |= D64_CTRL1_SOF;
/* See if all the segments can be accounted for */
if (map->nsegs >
(uint) (di->ntxd - NTXDACTIVE(di->txin, di->txout) -
1))
goto outoftxd;
nsegs = map->nsegs;
} else
nsegs = 1;
for (j = 1; j <= nsegs; j++) {
flags = 0;
if (p == p0 && j == 1)
flags |= D64_CTRL1_SOF;
/* With a DMA segment list, Descriptor table is filled /* With a DMA segment list, Descriptor table is filled
* using the segment list instead of looping over * using the segment list instead of looping over
* buffers in multi-chain DMA. Therefore, EOF for SGLIST * buffers in multi-chain DMA. Therefore, EOF for SGLIST
* is when end of segment list is reached. * is when end of segment list is reached.
*/ */
if ((!DMASGLIST_ENAB && next == NULL) || if (next == NULL)
(DMASGLIST_ENAB && j == nsegs)) flags |= (D64_CTRL1_IOC | D64_CTRL1_EOF);
flags |= (D64_CTRL1_IOC | D64_CTRL1_EOF); if (txout == (di->ntxd - 1))
if (txout == (di->ntxd - 1)) flags |= D64_CTRL1_EOT;
flags |= D64_CTRL1_EOT;
if (DMASGLIST_ENAB) {
len = map->segs[j - 1].length;
pa = map->segs[j - 1].addr;
}
dma64_dd_upd(di, di->txd64, pa, txout, &flags, len);
txout = NEXTTXD(txout); dma64_dd_upd(di, di->txd64, pa, txout, &flags, len);
}
/* See above. No need to loop over individual buffers */ txout = NEXTTXD(txout);
if (DMASGLIST_ENAB)
break;
} }
/* if last txd eof not set, fix it */ /* if last txd eof not set, fix it */
...@@ -1414,31 +1332,19 @@ struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range) ...@@ -1414,31 +1332,19 @@ struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range)
for (i = start; i != end && !txp; i = NEXTTXD(i)) { for (i = start; i != end && !txp; i = NEXTTXD(i)) {
unsigned long pa; unsigned long pa;
struct dma_seg_map *map = NULL; uint size;
uint size, j, nsegs;
pa = cpu_to_le32(di->txd64[i].addrlow) - di->dataoffsetlow; pa = cpu_to_le32(di->txd64[i].addrlow) - di->dataoffsetlow;
if (DMASGLIST_ENAB) { size =
map = &di->txp_dmah[i]; (cpu_to_le32(di->txd64[i].ctrl2) &
size = map->origsize; D64_CTRL2_BC_MASK);
nsegs = map->nsegs;
} else {
size =
(cpu_to_le32(di->txd64[i].ctrl2) &
D64_CTRL2_BC_MASK);
nsegs = 1;
}
for (j = nsegs; j > 0; j--) { di->txd64[i].addrlow = 0xdeadbeef;
di->txd64[i].addrlow = 0xdeadbeef; di->txd64[i].addrhigh = 0xdeadbeef;
di->txd64[i].addrhigh = 0xdeadbeef;
txp = di->txp[i]; txp = di->txp[i];
di->txp[i] = NULL; di->txp[i] = NULL;
if (j > 1)
i = NEXTTXD(i);
}
pci_unmap_single(di->pbus, pa, size, PCI_DMA_TODEVICE); pci_unmap_single(di->pbus, pa, size, PCI_DMA_TODEVICE);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment