Commit 7c8ae65a authored by Alexander Duyck's avatar Alexander Duyck Committed by Jeff Kirsher

ixgbe: Make FCoE allocation and configuration closer to how rings work

This patch changes the behavior of the FCoE configuration so that it is
much closer to how the main body of the ixgbe driver works for ring
allocation.

The first piece is the ixgbe_fcoe_ddp_enable/disable calls.  These allocate
the percpu values and if successful set the fcoe_ddp_xid value indicating
that we can support DDP.

The next piece is the ixgbe_setup/free_ddp_resources calls.  These are
called on open/close and will allocate and free the DMA pools.

Finally ixgbe_configure_fcoe is now just register configuration.  It can go
through and enable the registers for the FCoE redirection offload, and FIP
configuration without any interference from the DDP pool allocation.

The net result of all this is two fold.  First it adds a certain amount of
exception handling.  So for example if ixgbe_setup_fcoe_resources fails we
will actually generate an error in open and refuse to bring up the
interface.

Secondly it provides a much more graceful failure case than the previous
model which would skip setting up the registers for FCoE on failure to
allocate DDP resources leaving no Rx functionality enabled instead of just
disabling DDP.
Signed-off-by: default avatarAlexander Duyck <alexander.h.duyck@intel.com>
Tested-by: default avatarPhil Schmitt <phillip.j.schmitt@intel.com>
Tested-by: default avatarRoss Brattain <ross.b.brattain@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent 5a1ee270
...@@ -691,7 +691,6 @@ extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter); ...@@ -691,7 +691,6 @@ extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
extern int ixgbe_fso(struct ixgbe_ring *tx_ring, extern int ixgbe_fso(struct ixgbe_ring *tx_ring,
struct ixgbe_tx_buffer *first, struct ixgbe_tx_buffer *first,
u8 *hdr_len); u8 *hdr_len);
extern void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter);
extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
union ixgbe_adv_rx_desc *rx_desc, union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb); struct sk_buff *skb);
...@@ -700,6 +699,8 @@ extern int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, ...@@ -700,6 +699,8 @@ extern int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
extern int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid, extern int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
struct scatterlist *sgl, unsigned int sgc); struct scatterlist *sgl, unsigned int sgc);
extern int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid); extern int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid);
extern int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
extern void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
extern int ixgbe_fcoe_enable(struct net_device *netdev); extern int ixgbe_fcoe_enable(struct net_device *netdev);
extern int ixgbe_fcoe_disable(struct net_device *netdev); extern int ixgbe_fcoe_disable(struct net_device *netdev);
#ifdef CONFIG_IXGBE_DCB #ifdef CONFIG_IXGBE_DCB
......
...@@ -578,17 +578,6 @@ static void ixgbe_fcoe_dma_pool_free(struct ixgbe_fcoe *fcoe, unsigned int cpu) ...@@ -578,17 +578,6 @@ static void ixgbe_fcoe_dma_pool_free(struct ixgbe_fcoe *fcoe, unsigned int cpu)
ddp_pool->pool = NULL; ddp_pool->pool = NULL;
} }
static void ixgbe_fcoe_ddp_pools_free(struct ixgbe_fcoe *fcoe)
{
unsigned int cpu;
for_each_possible_cpu(cpu)
ixgbe_fcoe_dma_pool_free(fcoe, cpu);
free_percpu(fcoe->ddp_pool);
fcoe->ddp_pool = NULL;
}
static int ixgbe_fcoe_dma_pool_alloc(struct ixgbe_fcoe *fcoe, static int ixgbe_fcoe_dma_pool_alloc(struct ixgbe_fcoe *fcoe,
struct device *dev, struct device *dev,
unsigned int cpu) unsigned int cpu)
...@@ -612,21 +601,6 @@ static int ixgbe_fcoe_dma_pool_alloc(struct ixgbe_fcoe *fcoe, ...@@ -612,21 +601,6 @@ static int ixgbe_fcoe_dma_pool_alloc(struct ixgbe_fcoe *fcoe,
return 0; return 0;
} }
static void ixgbe_fcoe_ddp_pools_alloc(struct ixgbe_adapter *adapter)
{
struct ixgbe_fcoe *fcoe = &adapter->fcoe;
struct device *dev = &adapter->pdev->dev;
unsigned int cpu;
fcoe->ddp_pool = alloc_percpu(struct ixgbe_fcoe_ddp_pool);
if (!fcoe->ddp_pool)
return;
/* allocate pci pool for each cpu */
for_each_possible_cpu(cpu)
ixgbe_fcoe_dma_pool_alloc(fcoe, dev, cpu);
}
/** /**
* ixgbe_configure_fcoe - configures registers for fcoe at start * ixgbe_configure_fcoe - configures registers for fcoe at start
* @adapter: ptr to ixgbe adapter * @adapter: ptr to ixgbe adapter
...@@ -637,39 +611,14 @@ static void ixgbe_fcoe_ddp_pools_alloc(struct ixgbe_adapter *adapter) ...@@ -637,39 +611,14 @@ static void ixgbe_fcoe_ddp_pools_alloc(struct ixgbe_adapter *adapter)
*/ */
void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
{ {
int i, fcoe_q, fcoe_i; struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw *hw = &adapter->hw;
struct ixgbe_fcoe *fcoe = &adapter->fcoe; int i, fcoe_q, fcoe_i;
struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
u32 etqf; u32 etqf;
if (!fcoe->ddp_pool) { /* leave registers unconfigued if FCoE is disabled */
spin_lock_init(&fcoe->lock); if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
return;
ixgbe_fcoe_ddp_pools_alloc(adapter);
if (!fcoe->ddp_pool) {
e_err(drv, "failed to alloc percpu fcoe DDP pools\n");
return;
}
/* Extra buffer to be shared by all DDPs for HW work around */
fcoe->extra_ddp_buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC);
if (fcoe->extra_ddp_buffer == NULL) {
e_err(drv, "failed to allocated extra DDP buffer\n");
goto out_ddp_pools;
}
fcoe->extra_ddp_buffer_dma =
dma_map_single(&adapter->pdev->dev,
fcoe->extra_ddp_buffer,
IXGBE_FCBUFF_MIN,
DMA_FROM_DEVICE);
if (dma_mapping_error(&adapter->pdev->dev,
fcoe->extra_ddp_buffer_dma)) {
e_err(drv, "failed to map extra DDP buffer\n");
goto out_extra_ddp_buffer;
}
}
/* Enable L2 EtherType filter for FCoE, necessary for FCoE Rx CRC */ /* Enable L2 EtherType filter for FCoE, necessary for FCoE Rx CRC */
etqf = ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN; etqf = ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN;
...@@ -682,7 +631,7 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) ...@@ -682,7 +631,7 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
/* Use one or more Rx queues for FCoE by redirection table */ /* Use one or more Rx queues for FCoE by redirection table */
for (i = 0; i < IXGBE_FCRETA_SIZE; i++) { for (i = 0; i < IXGBE_FCRETA_SIZE; i++) {
fcoe_i = f->offset + (i % f->indices); fcoe_i = fcoe->offset + (i % fcoe->indices);
fcoe_i &= IXGBE_FCRETA_ENTRY_MASK; fcoe_i &= IXGBE_FCRETA_ENTRY_MASK;
fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q); IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q);
...@@ -698,7 +647,7 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) ...@@ -698,7 +647,7 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP), etqf); IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP), etqf);
/* Send FIP frames to the first FCoE queue */ /* Send FIP frames to the first FCoE queue */
fcoe_q = adapter->rx_ring[f->offset]->reg_idx; fcoe_q = adapter->rx_ring[fcoe->offset]->reg_idx;
IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP), IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP),
IXGBE_ETQS_QUEUE_EN | IXGBE_ETQS_QUEUE_EN |
(fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT)); (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
...@@ -707,40 +656,122 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) ...@@ -707,40 +656,122 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL,
IXGBE_FCRXCTRL_FCCRCBO | IXGBE_FCRXCTRL_FCCRCBO |
(FC_FCOE_VER << IXGBE_FCRXCTRL_FCOEVER_SHIFT)); (FC_FCOE_VER << IXGBE_FCRXCTRL_FCOEVER_SHIFT));
return;
out_extra_ddp_buffer:
kfree(fcoe->extra_ddp_buffer);
out_ddp_pools:
ixgbe_fcoe_ddp_pools_free(fcoe);
} }
/** /**
* ixgbe_cleanup_fcoe - release all fcoe ddp context resources * ixgbe_free_fcoe_ddp_resources - release all fcoe ddp context resources
* @adapter : ixgbe adapter * @adapter : ixgbe adapter
* *
* Cleans up outstanding ddp context resources * Cleans up outstanding ddp context resources
* *
* Returns : none * Returns : none
*/ */
void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter) void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter)
{ {
int i;
struct ixgbe_fcoe *fcoe = &adapter->fcoe; struct ixgbe_fcoe *fcoe = &adapter->fcoe;
int cpu, i;
/* do nothing if no DDP pools were allocated */
if (!fcoe->ddp_pool) if (!fcoe->ddp_pool)
return; return;
for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++) for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++)
ixgbe_fcoe_ddp_put(adapter->netdev, i); ixgbe_fcoe_ddp_put(adapter->netdev, i);
for_each_possible_cpu(cpu)
ixgbe_fcoe_dma_pool_free(fcoe, cpu);
dma_unmap_single(&adapter->pdev->dev, dma_unmap_single(&adapter->pdev->dev,
fcoe->extra_ddp_buffer_dma, fcoe->extra_ddp_buffer_dma,
IXGBE_FCBUFF_MIN, IXGBE_FCBUFF_MIN,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
kfree(fcoe->extra_ddp_buffer); kfree(fcoe->extra_ddp_buffer);
ixgbe_fcoe_ddp_pools_free(fcoe); fcoe->extra_ddp_buffer = NULL;
fcoe->extra_ddp_buffer_dma = 0;
}
/**
* ixgbe_setup_fcoe_ddp_resources - setup all fcoe ddp context resources
* @adapter: ixgbe adapter
*
* Sets up ddp context resouces
*
* Returns : 0 indicates success or -EINVAL on failure
*/
int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter)
{
struct ixgbe_fcoe *fcoe = &adapter->fcoe;
struct device *dev = &adapter->pdev->dev;
void *buffer;
dma_addr_t dma;
unsigned int cpu;
/* do nothing if no DDP pools were allocated */
if (!fcoe->ddp_pool)
return 0;
/* Extra buffer to be shared by all DDPs for HW work around */
buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC);
if (!buffer) {
e_err(drv, "failed to allocate extra DDP buffer\n");
return -ENOMEM;
}
dma = dma_map_single(dev, buffer, IXGBE_FCBUFF_MIN, DMA_FROM_DEVICE);
if (dma_mapping_error(dev, dma)) {
e_err(drv, "failed to map extra DDP buffer\n");
kfree(buffer);
return -ENOMEM;
}
fcoe->extra_ddp_buffer = buffer;
fcoe->extra_ddp_buffer_dma = dma;
/* allocate pci pool for each cpu */
for_each_possible_cpu(cpu) {
int err = ixgbe_fcoe_dma_pool_alloc(fcoe, dev, cpu);
if (!err)
continue;
e_err(drv, "failed to alloc DDP pool on cpu:%d\n", cpu);
ixgbe_free_fcoe_ddp_resources(adapter);
return -ENOMEM;
}
return 0;
}
static int ixgbe_fcoe_ddp_enable(struct ixgbe_adapter *adapter)
{
struct ixgbe_fcoe *fcoe = &adapter->fcoe;
if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE))
return -EINVAL;
fcoe->ddp_pool = alloc_percpu(struct ixgbe_fcoe_ddp_pool);
if (!fcoe->ddp_pool) {
e_err(drv, "failed to allocate percpu DDP resources\n");
return -ENOMEM;
}
adapter->netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1;
return 0;
}
static void ixgbe_fcoe_ddp_disable(struct ixgbe_adapter *adapter)
{
struct ixgbe_fcoe *fcoe = &adapter->fcoe;
adapter->netdev->fcoe_ddp_xid = 0;
if (!fcoe->ddp_pool)
return;
free_percpu(fcoe->ddp_pool);
fcoe->ddp_pool = NULL;
} }
/** /**
...@@ -753,40 +784,37 @@ void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter) ...@@ -753,40 +784,37 @@ void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter)
*/ */
int ixgbe_fcoe_enable(struct net_device *netdev) int ixgbe_fcoe_enable(struct net_device *netdev)
{ {
int rc = -EINVAL;
struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_fcoe *fcoe = &adapter->fcoe; struct ixgbe_fcoe *fcoe = &adapter->fcoe;
atomic_inc(&fcoe->refcnt);
if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE)) if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE))
goto out_enable; return -EINVAL;
atomic_inc(&fcoe->refcnt);
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
goto out_enable; return -EINVAL;
e_info(drv, "Enabling FCoE offload features.\n"); e_info(drv, "Enabling FCoE offload features.\n");
if (netif_running(netdev)) if (netif_running(netdev))
netdev->netdev_ops->ndo_stop(netdev); netdev->netdev_ops->ndo_stop(netdev);
ixgbe_clear_interrupt_scheme(adapter); /* Allocate per CPU memory to track DDP pools */
ixgbe_fcoe_ddp_enable(adapter);
/* enable FCoE and notify stack */
adapter->flags |= IXGBE_FLAG_FCOE_ENABLED; adapter->flags |= IXGBE_FLAG_FCOE_ENABLED;
adapter->ring_feature[RING_F_FCOE].limit = IXGBE_FCRETA_SIZE; netdev->features |= NETIF_F_FSO | NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU;
netdev->features |= NETIF_F_FCOE_CRC; netdev_features_change(netdev);
netdev->features |= NETIF_F_FSO;
netdev->features |= NETIF_F_FCOE_MTU;
netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1;
/* release existing queues and reallocate them */
ixgbe_clear_interrupt_scheme(adapter);
ixgbe_init_interrupt_scheme(adapter); ixgbe_init_interrupt_scheme(adapter);
netdev_features_change(netdev);
if (netif_running(netdev)) if (netif_running(netdev))
netdev->netdev_ops->ndo_open(netdev); netdev->netdev_ops->ndo_open(netdev);
rc = 0;
out_enable: return 0;
return rc;
} }
/** /**
...@@ -799,41 +827,37 @@ int ixgbe_fcoe_enable(struct net_device *netdev) ...@@ -799,41 +827,37 @@ int ixgbe_fcoe_enable(struct net_device *netdev)
*/ */
int ixgbe_fcoe_disable(struct net_device *netdev) int ixgbe_fcoe_disable(struct net_device *netdev)
{ {
int rc = -EINVAL;
struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_fcoe *fcoe = &adapter->fcoe;
if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE)) if (!atomic_dec_and_test(&adapter->fcoe.refcnt))
goto out_disable; return -EINVAL;
if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
goto out_disable; return -EINVAL;
if (!atomic_dec_and_test(&fcoe->refcnt))
goto out_disable;
e_info(drv, "Disabling FCoE offload features.\n"); e_info(drv, "Disabling FCoE offload features.\n");
netdev->features &= ~NETIF_F_FCOE_CRC;
netdev->features &= ~NETIF_F_FSO;
netdev->features &= ~NETIF_F_FCOE_MTU;
netdev->fcoe_ddp_xid = 0;
netdev_features_change(netdev);
if (netif_running(netdev)) if (netif_running(netdev))
netdev->netdev_ops->ndo_stop(netdev); netdev->netdev_ops->ndo_stop(netdev);
ixgbe_clear_interrupt_scheme(adapter); /* Free per CPU memory to track DDP pools */
ixgbe_fcoe_ddp_disable(adapter);
/* disable FCoE and notify stack */
adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
adapter->ring_feature[RING_F_FCOE].indices = 0; netdev->features &= ~(NETIF_F_FCOE_CRC |
ixgbe_cleanup_fcoe(adapter); NETIF_F_FSO |
NETIF_F_FCOE_MTU);
netdev_features_change(netdev);
/* release existing queues and reallocate them */
ixgbe_clear_interrupt_scheme(adapter);
ixgbe_init_interrupt_scheme(adapter); ixgbe_init_interrupt_scheme(adapter);
if (netif_running(netdev)) if (netif_running(netdev))
netdev->netdev_ops->ndo_open(netdev); netdev->netdev_ops->ndo_open(netdev);
rc = 0;
out_disable: return 0;
return rc;
} }
/** /**
......
...@@ -77,7 +77,7 @@ struct ixgbe_fcoe { ...@@ -77,7 +77,7 @@ struct ixgbe_fcoe {
atomic_t refcnt; atomic_t refcnt;
spinlock_t lock; spinlock_t lock;
struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX]; struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX];
unsigned char *extra_ddp_buffer; void *extra_ddp_buffer;
dma_addr_t extra_ddp_buffer_dma; dma_addr_t extra_ddp_buffer_dma;
unsigned long mode; unsigned long mode;
#ifdef CONFIG_IXGBE_DCB #ifdef CONFIG_IXGBE_DCB
......
...@@ -3807,12 +3807,6 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter) ...@@ -3807,12 +3807,6 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
ixgbe_set_rx_mode(adapter->netdev); ixgbe_set_rx_mode(adapter->netdev);
ixgbe_restore_vlan(adapter); ixgbe_restore_vlan(adapter);
#ifdef IXGBE_FCOE
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
ixgbe_configure_fcoe(adapter);
#endif /* IXGBE_FCOE */
switch (hw->mac.type) { switch (hw->mac.type) {
case ixgbe_mac_82599EB: case ixgbe_mac_82599EB:
case ixgbe_mac_X540: case ixgbe_mac_X540:
...@@ -3842,6 +3836,11 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter) ...@@ -3842,6 +3836,11 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
ixgbe_configure_virtualization(adapter); ixgbe_configure_virtualization(adapter);
#ifdef IXGBE_FCOE
/* configure FCoE L2 filters, redirection table, and Rx control */
ixgbe_configure_fcoe(adapter);
#endif /* IXGBE_FCOE */
ixgbe_configure_tx(adapter); ixgbe_configure_tx(adapter);
ixgbe_configure_rx(adapter); ixgbe_configure_rx(adapter);
} }
...@@ -4434,6 +4433,11 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) ...@@ -4434,6 +4433,11 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
break; break;
} }
#ifdef IXGBE_FCOE
/* FCoE support exists, always init the FCoE lock */
spin_lock_init(&adapter->fcoe.lock);
#endif
/* n-tuple support exists, always init our spinlock */ /* n-tuple support exists, always init our spinlock */
spin_lock_init(&adapter->fdir_perfect_lock); spin_lock_init(&adapter->fdir_perfect_lock);
...@@ -4662,7 +4666,11 @@ static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter) ...@@ -4662,7 +4666,11 @@ static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
goto err_setup_rx; goto err_setup_rx;
} }
return 0; #ifdef IXGBE_FCOE
err = ixgbe_setup_fcoe_ddp_resources(adapter);
if (!err)
#endif
return 0;
err_setup_rx: err_setup_rx:
/* rewind the index freeing the rings as we go */ /* rewind the index freeing the rings as we go */
while (i--) while (i--)
...@@ -4741,6 +4749,10 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter) ...@@ -4741,6 +4749,10 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
{ {
int i; int i;
#ifdef IXGBE_FCOE
ixgbe_free_fcoe_ddp_resources(adapter);
#endif
for (i = 0; i < adapter->num_rx_queues; i++) for (i = 0; i < adapter->num_rx_queues; i++)
if (adapter->rx_ring[i]->desc) if (adapter->rx_ring[i]->desc)
ixgbe_free_rx_resources(adapter->rx_ring[i]); ixgbe_free_rx_resources(adapter->rx_ring[i]);
...@@ -7235,11 +7247,12 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, ...@@ -7235,11 +7247,12 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS) if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)
adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
} }
}
if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) { adapter->ring_feature[RING_F_FCOE].limit = IXGBE_FCRETA_SIZE;
netdev->vlan_features |= NETIF_F_FCOE_CRC;
netdev->vlan_features |= NETIF_F_FSO; netdev->vlan_features |= NETIF_F_FSO |
netdev->vlan_features |= NETIF_F_FCOE_MTU; NETIF_F_FCOE_CRC |
NETIF_F_FCOE_MTU;
} }
#endif /* IXGBE_FCOE */ #endif /* IXGBE_FCOE */
if (pci_using_dac) { if (pci_using_dac) {
...@@ -7436,12 +7449,6 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev) ...@@ -7436,12 +7449,6 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
ixgbe_sysfs_exit(adapter); ixgbe_sysfs_exit(adapter);
#endif /* CONFIG_IXGBE_HWMON */ #endif /* CONFIG_IXGBE_HWMON */
#ifdef IXGBE_FCOE
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
ixgbe_cleanup_fcoe(adapter);
#endif /* IXGBE_FCOE */
/* remove the added san mac */ /* remove the added san mac */
ixgbe_del_sanmac_netdev(netdev); ixgbe_del_sanmac_netdev(netdev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment