Commit 1f1c2881 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6

* 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6: (37 commits)
  forcedeth bug fix: realtek phy
  forcedeth bug fix: vitesse phy
  forcedeth bug fix: cicada phy
  atl1: reorder atl1_main functions
  atl1: fix excessively indented code
  atl1: cleanup atl1_main
  atl1: header file cleanup
  atl1: remove irq_sem
  cdc-subset to support new vendor/product ID
  8139cp: implement the missing dev->tx_timeout
  myri10ge: Remove nonsensical limit in the tx done routine
  gianfar: kill unused header
  EP93XX_ETH must select MII
  macb: Add multicast capability
  macb: Use generic PHY layer
  s390: add barriers to qeth driver
  s390: scatter-gather for inbound traffic in qeth driver
  eHEA: Introducing support vor DLPAR memory add
  Fix a potential NULL pointer dereference in free_shared_mem() in drivers/net/s2io.c
  [PATCH] softmac: Fix ESSID problem
  ...
parents 7608a864 c5e3ae88
......@@ -26,7 +26,6 @@
TODO:
* Test Tx checksumming thoroughly
* Implement dev->tx_timeout
Low priority TODO:
* Complete reset on PciErr
......@@ -1218,6 +1217,30 @@ static int cp_close (struct net_device *dev)
return 0;
}
static void cp_tx_timeout(struct net_device *dev)
{
struct cp_private *cp = netdev_priv(dev);
unsigned long flags;
int rc;
printk(KERN_WARNING "%s: Transmit timeout, status %2x %4x %4x %4x\n",
dev->name, cpr8(Cmd), cpr16(CpCmd),
cpr16(IntrStatus), cpr16(IntrMask));
spin_lock_irqsave(&cp->lock, flags);
cp_stop_hw(cp);
cp_clean_rings(cp);
rc = cp_init_rings(cp);
cp_start_hw(cp);
netif_wake_queue(dev);
spin_unlock_irqrestore(&cp->lock, flags);
return;
}
#ifdef BROKEN
static int cp_change_mtu(struct net_device *dev, int new_mtu)
{
......@@ -1920,10 +1943,8 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
dev->change_mtu = cp_change_mtu;
#endif
dev->ethtool_ops = &cp_ethtool_ops;
#if 0
dev->tx_timeout = cp_tx_timeout;
dev->watchdog_timeo = TX_TIMEOUT;
#endif
#if CP_VLAN_TAG_USED
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
......
......@@ -205,7 +205,7 @@ config MII
config MACB
tristate "Atmel MACB support"
depends on AVR32 || ARCH_AT91SAM9260 || ARCH_AT91SAM9263
select MII
select PHYLIB
help
The Atmel MACB ethernet interface is found on many AT32 and AT91
parts. Say Y to include support for the MACB chip.
......
......@@ -43,6 +43,7 @@ config ARM_AT91_ETHER
config EP93XX_ETH
tristate "EP93xx Ethernet support"
depends on ARM && ARCH_EP93XX
select MII
help
This is a driver for the ethernet hardware included in EP93xx CPUs.
Say Y if you are building a kernel for EP93xx based devices.
......@@ -43,6 +43,7 @@ extern const struct ethtool_ops atl1_ethtool_ops;
struct atl1_adapter;
#define ATL1_MAX_INTR 3
#define ATL1_MAX_TX_BUF_LEN 0x3000 /* 12288 bytes */
#define ATL1_DEFAULT_TPD 256
#define ATL1_MAX_TPD 1024
......@@ -57,29 +58,45 @@ struct atl1_adapter;
#define ATL1_RRD_DESC(R, i) ATL1_GET_DESC(R, i, struct rx_return_desc)
/*
* This detached comment is preserved for documentation purposes only.
* It was originally attached to some code that got deleted, but seems
* important enough to keep around...
*
* <begin detached comment>
* Some workarounds require millisecond delays and are run during interrupt
* context. Most notably, when establishing link, the phy may need tweaking
* but cannot process phy register reads/writes faster than millisecond
* intervals...and we establish link due to a "link status change" interrupt.
* <end detached comment>
*/
/*
* atl1_ring_header represents a single, contiguous block of DMA space
* mapped for the three descriptor rings (tpd, rfd, rrd) and the two
* message blocks (cmb, smb) described below
*/
struct atl1_ring_header {
void *desc; /* virtual address */
dma_addr_t dma; /* physical address*/
unsigned int size; /* length in bytes */
};
/*
* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer
* atl1_buffer is wrapper around a pointer to a socket buffer
* so a DMA handle can be stored along with the skb
*/
struct atl1_buffer {
struct sk_buff *skb;
u16 length;
u16 alloced;
struct sk_buff *skb; /* socket buffer */
u16 length; /* rx buffer length */
u16 alloced; /* 1 if skb allocated */
dma_addr_t dma;
};
#define MAX_TX_BUF_LEN 0x3000 /* 12KB */
/* transmit packet descriptor (tpd) ring */
struct atl1_tpd_ring {
void *desc; /* pointer to the descriptor ring memory */
dma_addr_t dma; /* physical adress of the descriptor ring */
u16 size; /* length of descriptor ring in bytes */
void *desc; /* descriptor ring virtual address */
dma_addr_t dma; /* descriptor ring physical address */
u16 size; /* descriptor ring length in bytes */
u16 count; /* number of descriptors in the ring */
u16 hw_idx; /* hardware index */
atomic_t next_to_clean;
......@@ -87,36 +104,34 @@ struct atl1_tpd_ring {
struct atl1_buffer *buffer_info;
};
/* receive free descriptor (rfd) ring */
struct atl1_rfd_ring {
void *desc;
dma_addr_t dma;
u16 size;
u16 count;
void *desc; /* descriptor ring virtual address */
dma_addr_t dma; /* descriptor ring physical address */
u16 size; /* descriptor ring length in bytes */
u16 count; /* number of descriptors in the ring */
atomic_t next_to_use;
u16 next_to_clean;
struct atl1_buffer *buffer_info;
};
/* receive return descriptor (rrd) ring */
struct atl1_rrd_ring {
void *desc;
dma_addr_t dma;
unsigned int size;
u16 count;
void *desc; /* descriptor ring virtual address */
dma_addr_t dma; /* descriptor ring physical address */
unsigned int size; /* descriptor ring length in bytes */
u16 count; /* number of descriptors in the ring */
u16 next_to_use;
atomic_t next_to_clean;
};
struct atl1_ring_header {
void *desc; /* pointer to the descriptor ring memory */
dma_addr_t dma; /* physical adress of the descriptor ring */
unsigned int size; /* length of descriptor ring in bytes */
};
/* coalescing message block (cmb) */
struct atl1_cmb {
struct coals_msg_block *cmb;
dma_addr_t dma;
};
/* statistics message block (smb) */
struct atl1_smb {
struct stats_msg_block *smb;
dma_addr_t dma;
......@@ -141,24 +156,26 @@ struct atl1_sft_stats {
u64 tx_aborted_errors;
u64 tx_window_errors;
u64 tx_carrier_errors;
u64 tx_pause; /* num Pause packet transmitted. */
u64 excecol; /* num tx packets aborted due to excessive collisions. */
u64 deffer; /* num deferred tx packets */
u64 scc; /* num packets subsequently transmitted successfully w/ single prior collision. */
u64 mcc; /* num packets subsequently transmitted successfully w/ multiple prior collisions. */
u64 tx_pause; /* num pause packets transmitted. */
u64 excecol; /* num tx packets w/ excessive collisions. */
u64 deffer; /* num tx packets deferred */
u64 scc; /* num packets subsequently transmitted
* successfully w/ single prior collision. */
u64 mcc; /* num packets subsequently transmitted
* successfully w/ multiple prior collisions. */
u64 latecol; /* num tx packets w/ late collisions. */
u64 tx_underun; /* num tx packets aborted due to transmit FIFO underrun, or TRD FIFO underrun */
u64 tx_trunc; /* num tx packets truncated due to size exceeding MTU, regardless whether truncated by Selene or not. (The name doesn't really reflect the meaning in this case.) */
u64 tx_underun; /* num tx packets aborted due to transmit
* FIFO underrun, or TRD FIFO underrun */
u64 tx_trunc; /* num tx packets truncated due to size
* exceeding MTU, regardless whether truncated
* by the chip or not. (The name doesn't really
* reflect the meaning in this case.) */
u64 rx_pause; /* num Pause packets received. */
u64 rx_rrd_ov;
u64 rx_trunc;
};
/* board specific private data structure */
#define ATL1_REGS_LEN 8
/* Structure containing variables used by the shared code */
/* hardware structure */
struct atl1_hw {
u8 __iomem *hw_addr;
struct atl1_adapter *back;
......@@ -167,24 +184,35 @@ struct atl1_hw {
enum atl1_dma_req_block dmar_block;
enum atl1_dma_req_block dmaw_block;
u8 preamble_len;
u8 max_retry; /* Retransmission maximum, after which the packet will be discarded */
u8 jam_ipg; /* IPG to start JAM for collision based flow control in half-duplex mode. In units of 8-bit time */
u8 ipgt; /* Desired back to back inter-packet gap. The default is 96-bit time */
u8 min_ifg; /* Minimum number of IFG to enforce in between RX frames. Frame gap below such IFP is dropped */
u8 max_retry; /* Retransmission maximum, after which the
* packet will be discarded */
u8 jam_ipg; /* IPG to start JAM for collision based flow
* control in half-duplex mode. In units of
* 8-bit time */
u8 ipgt; /* Desired back to back inter-packet gap.
* The default is 96-bit time */
u8 min_ifg; /* Minimum number of IFG to enforce in between
* receive frames. Frame gap below such IFP
* is dropped */
u8 ipgr1; /* 64bit Carrier-Sense window */
u8 ipgr2; /* 96-bit IPG window */
u8 tpd_burst; /* Number of TPD to prefetch in cache-aligned burst. Each TPD is 16 bytes long */
u8 rfd_burst; /* Number of RFD to prefetch in cache-aligned burst. Each RFD is 12 bytes long */
u8 tpd_burst; /* Number of TPD to prefetch in cache-aligned
* burst. Each TPD is 16 bytes long */
u8 rfd_burst; /* Number of RFD to prefetch in cache-aligned
* burst. Each RFD is 12 bytes long */
u8 rfd_fetch_gap;
u8 rrd_burst; /* Threshold number of RRDs that can be retired in a burst. Each RRD is 16 bytes long */
u8 rrd_burst; /* Threshold number of RRDs that can be retired
* in a burst. Each RRD is 16 bytes long */
u8 tpd_fetch_th;
u8 tpd_fetch_gap;
u16 tx_jumbo_task_th;
u16 txf_burst; /* Number of data bytes to read in a cache-aligned burst. Each SRAM entry is
8 bytes long */
u16 rx_jumbo_th; /* Jumbo packet size for non-VLAN packet. VLAN packets should add 4 bytes */
u16 txf_burst; /* Number of data bytes to read in a cache-
* aligned burst. Each SRAM entry is 8 bytes */
u16 rx_jumbo_th; /* Jumbo packet size for non-VLAN packet. VLAN
* packets should add 4 bytes */
u16 rx_jumbo_lkah;
u16 rrd_ret_timer; /* RRD retirement timer. Decrement by 1 after every 512ns passes. */
u16 rrd_ret_timer; /* RRD retirement timer. Decrement by 1 after
* every 512ns passes. */
u16 lcol; /* Collision Window */
u16 cmb_tpd;
......@@ -194,48 +222,35 @@ struct atl1_hw {
u32 smb_timer;
u16 media_type;
u16 autoneg_advertised;
u16 pci_cmd_word;
u16 mii_autoneg_adv_reg;
u16 mii_1000t_ctrl_reg;
u32 mem_rang;
u32 txcw;
u32 max_frame_size;
u32 min_frame_size;
u32 mc_filter_type;
u32 num_mc_addrs;
u32 collision_delta;
u32 tx_packet_delta;
u16 phy_spd_default;
u16 dev_rev;
/* spi flash */
u8 flash_vendor;
u8 dma_fairness;
u8 mac_addr[ETH_ALEN];
u8 perm_mac_addr[ETH_ALEN];
/* bool phy_preamble_sup; */
bool phy_configured;
};
struct atl1_adapter {
/* OS defined structs */
struct net_device *netdev;
struct pci_dev *pdev;
struct net_device_stats net_stats;
struct atl1_sft_stats soft_stats;
struct vlan_group *vlgrp;
u32 rx_buffer_len;
u32 wol;
u16 link_speed;
u16 link_duplex;
spinlock_t lock;
atomic_t irq_sem;
struct work_struct tx_timeout_task;
struct work_struct link_chg_task;
struct work_struct pcie_dma_to_rst_task;
......@@ -243,9 +258,7 @@ struct atl1_adapter {
struct timer_list phy_config_timer;
bool phy_timer_pending;
bool mac_disabled;
/* All descriptor rings' memory */
/* all descriptor rings' memory */
struct atl1_ring_header ring_header;
/* TX */
......@@ -258,25 +271,16 @@ struct atl1_adapter {
u64 hw_csum_err;
u64 hw_csum_good;
u32 gorcl;
u64 gorcl_old;
/* Interrupt Moderator timer ( 2us resolution) */
u16 imt;
/* Interrupt Clear timer (2us resolution) */
u16 ict;
/* MII interface info */
struct mii_if_info mii;
u16 imt; /* interrupt moderator timer (2us resolution */
u16 ict; /* interrupt clear timer (2us resolution */
struct mii_if_info mii; /* MII interface info */
/* structs defined in atl1_hw.h */
u32 bd_number; /* board number */
u32 bd_number; /* board number */
bool pci_using_64;
struct atl1_hw hw;
struct atl1_smb smb;
struct atl1_cmb cmb;
u32 pci_state[16];
};
#endif /* _ATL1_H_ */
This diff is collapsed.
......@@ -39,13 +39,13 @@
#include <asm/io.h>
#define DRV_NAME "ehea"
#define DRV_VERSION "EHEA_0067"
#define DRV_VERSION "EHEA_0070"
/* EHEA capability flags */
/* eHEA capability flags */
#define DLPAR_PORT_ADD_REM 1
#define DLPAR_MEM_ADD 2
#define DLPAR_MEM_REM 4
#define EHEA_CAPABILITIES (DLPAR_PORT_ADD_REM)
#define DLPAR_MEM_ADD 2
#define DLPAR_MEM_REM 4
#define EHEA_CAPABILITIES (DLPAR_PORT_ADD_REM)
#define EHEA_MSG_DEFAULT (NETIF_MSG_LINK | NETIF_MSG_TIMER \
| NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
......@@ -113,6 +113,8 @@
/* Memory Regions */
#define EHEA_MR_ACC_CTRL 0x00800000
#define EHEA_BUSMAP_START 0x8000000000000000ULL
#define EHEA_WATCH_DOG_TIMEOUT 10*HZ
/* utility functions */
......@@ -186,6 +188,12 @@ struct h_epas {
set to 0 if unused */
};
struct ehea_busmap {
unsigned int entries; /* total number of entries */
unsigned int valid_sections; /* number of valid sections */
u64 *vaddr;
};
struct ehea_qp;
struct ehea_cq;
struct ehea_eq;
......@@ -382,6 +390,8 @@ struct ehea_adapter {
struct ehea_mr mr;
u32 pd; /* protection domain */
u64 max_mc_mac; /* max number of multicast mac addresses */
int active_ports;
struct list_head list;
};
......@@ -431,6 +441,9 @@ struct port_res_cfg {
int max_entries_rq3;
};
enum ehea_flag_bits {
__EHEA_STOP_XFER
};
void ehea_set_ethtool_ops(struct net_device *netdev);
int ehea_sense_port_attr(struct ehea_port *port);
......
......@@ -79,6 +79,11 @@ MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue "
MODULE_PARM_DESC(use_mcs, " 0:NAPI, 1:Multiple receive queues, Default = 1 ");
static int port_name_cnt = 0;
static LIST_HEAD(adapter_list);
u64 ehea_driver_flags = 0;
struct workqueue_struct *ehea_driver_wq;
struct work_struct ehea_rereg_mr_task;
static int __devinit ehea_probe_adapter(struct ibmebus_dev *dev,
const struct of_device_id *id);
......@@ -238,13 +243,17 @@ static int ehea_refill_rq_def(struct ehea_port_res *pr,
rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type)
| EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index);
rwqe->sg_list[0].l_key = pr->recv_mr.lkey;
rwqe->sg_list[0].vaddr = (u64)skb->data;
rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
rwqe->sg_list[0].len = packet_size;
rwqe->data_segments = 1;
index++;
index &= max_index_mask;
if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags)))
goto out;
}
q_skba->index = index;
/* Ring doorbell */
......@@ -253,7 +262,7 @@ static int ehea_refill_rq_def(struct ehea_port_res *pr,
ehea_update_rq2a(pr->qp, i);
else
ehea_update_rq3a(pr->qp, i);
out:
return ret;
}
......@@ -1321,7 +1330,7 @@ static void write_swqe2_TSO(struct sk_buff *skb,
sg1entry->len = skb_data_size - headersize;
tmp_addr = (u64)(skb->data + headersize);
sg1entry->vaddr = tmp_addr;
sg1entry->vaddr = ehea_map_vaddr(tmp_addr);
swqe->descriptors++;
}
} else
......@@ -1352,7 +1361,7 @@ static void write_swqe2_nonTSO(struct sk_buff *skb,
sg1entry->l_key = lkey;
sg1entry->len = skb_data_size - SWQE2_MAX_IMM;
tmp_addr = (u64)(skb->data + SWQE2_MAX_IMM);
sg1entry->vaddr = tmp_addr;
sg1entry->vaddr = ehea_map_vaddr(tmp_addr);
swqe->descriptors++;
}
} else {
......@@ -1391,7 +1400,7 @@ static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
sg1entry->len = frag->size;
tmp_addr = (u64)(page_address(frag->page)
+ frag->page_offset);
sg1entry->vaddr = tmp_addr;
sg1entry->vaddr = ehea_map_vaddr(tmp_addr);
swqe->descriptors++;
sg1entry_contains_frag_data = 1;
}
......@@ -1406,7 +1415,7 @@ static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
tmp_addr = (u64)(page_address(frag->page)
+ frag->page_offset);
sgentry->vaddr = tmp_addr;
sgentry->vaddr = ehea_map_vaddr(tmp_addr);
swqe->descriptors++;
}
}
......@@ -1878,6 +1887,9 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
ehea_dump(swqe, 512, "swqe");
}
if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags)))
goto out;
ehea_post_swqe(pr->qp, swqe);
pr->tx_packets++;
......@@ -1892,7 +1904,7 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
dev->trans_start = jiffies;
spin_unlock(&pr->xmit_lock);
out:
return NETDEV_TX_OK;
}
......@@ -2220,6 +2232,9 @@ static int ehea_up(struct net_device *dev)
out_clean_pr:
ehea_clean_all_portres(port);
out:
if (ret)
ehea_info("Failed starting %s. ret=%i", dev->name, ret);
return ret;
}
......@@ -2259,8 +2274,13 @@ static int ehea_down(struct net_device *dev)
msleep(1);
ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
ret = ehea_clean_all_portres(port);
port->state = EHEA_PORT_DOWN;
ret = ehea_clean_all_portres(port);
if (ret)
ehea_info("Failed freeing resources for %s. ret=%i",
dev->name, ret);
return ret;
}
......@@ -2292,15 +2312,11 @@ static void ehea_reset_port(struct work_struct *work)
netif_stop_queue(dev);
netif_poll_disable(dev);
ret = ehea_down(dev);
if (ret)
ehea_error("ehea_down failed. not all resources are freed");
ehea_down(dev);
ret = ehea_up(dev);
if (ret) {
ehea_error("Reset device %s failed: ret=%d", dev->name, ret);
if (ret)
goto out;
}
if (netif_msg_timer(port))
ehea_info("Device %s resetted successfully", dev->name);
......@@ -2312,6 +2328,88 @@ static void ehea_reset_port(struct work_struct *work)
return;
}
static void ehea_rereg_mrs(struct work_struct *work)
{
int ret, i;
struct ehea_adapter *adapter;
ehea_info("LPAR memory enlarged - re-initializing driver");
list_for_each_entry(adapter, &adapter_list, list)
if (adapter->active_ports) {
/* Shutdown all ports */
for (i = 0; i < EHEA_MAX_PORTS; i++) {
struct ehea_port *port = adapter->port[i];
if (port) {
struct net_device *dev = port->netdev;
if (dev->flags & IFF_UP) {
ehea_info("stopping %s",
dev->name);
down(&port->port_lock);
netif_stop_queue(dev);
netif_poll_disable(dev);
ehea_down(dev);
up(&port->port_lock);
}
}
}
/* Unregister old memory region */
ret = ehea_rem_mr(&adapter->mr);
if (ret) {
ehea_error("unregister MR failed - driver"
" inoperable!");
goto out;
}
}
ehea_destroy_busmap();
ret = ehea_create_busmap();
if (ret)
goto out;
clear_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
list_for_each_entry(adapter, &adapter_list, list)
if (adapter->active_ports) {
/* Register new memory region */
ret = ehea_reg_kernel_mr(adapter, &adapter->mr);
if (ret) {
ehea_error("register MR failed - driver"
" inoperable!");
goto out;
}
/* Restart all ports */
for (i = 0; i < EHEA_MAX_PORTS; i++) {
struct ehea_port *port = adapter->port[i];
if (port) {
struct net_device *dev = port->netdev;
if (dev->flags & IFF_UP) {
ehea_info("restarting %s",
dev->name);
down(&port->port_lock);
ret = ehea_up(dev);
if (!ret) {
netif_poll_enable(dev);
netif_wake_queue(dev);
}
up(&port->port_lock);
}
}
}
}
out:
return;
}
static void ehea_tx_watchdog(struct net_device *dev)
{
struct ehea_port *port = netdev_priv(dev);
......@@ -2573,6 +2671,8 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
ehea_info("%s: Jumbo frames are %sabled", dev->name,
jumbo == 1 ? "en" : "dis");
adapter->active_ports++;
return port;
out_unreg_port:
......@@ -2596,6 +2696,7 @@ static void ehea_shutdown_single_port(struct ehea_port *port)
ehea_unregister_port(port);
kfree(port->mc_list);
free_netdev(port->netdev);
port->adapter->active_ports--;
}
static int ehea_setup_ports(struct ehea_adapter *adapter)
......@@ -2788,6 +2889,8 @@ static int __devinit ehea_probe_adapter(struct ibmebus_dev *dev,
goto out;
}
list_add(&adapter->list, &adapter_list);
adapter->ebus_dev = dev;
adapter_handle = of_get_property(dev->ofdev.node, "ibm,hea-handle",
......@@ -2891,7 +2994,10 @@ static int __devexit ehea_remove(struct ibmebus_dev *dev)
ehea_destroy_eq(adapter->neq);
ehea_remove_adapter_mr(adapter);
list_del(&adapter->list);
kfree(adapter);
return 0;
}
......@@ -2939,9 +3045,18 @@ int __init ehea_module_init(void)
printk(KERN_INFO "IBM eHEA ethernet device driver (Release %s)\n",
DRV_VERSION);
ehea_driver_wq = create_workqueue("ehea_driver_wq");
INIT_WORK(&ehea_rereg_mr_task, ehea_rereg_mrs);
ret = check_module_parm();
if (ret)
goto out;
ret = ehea_create_busmap();
if (ret)
goto out;
ret = ibmebus_register_driver(&ehea_driver);
if (ret) {
ehea_error("failed registering eHEA device driver on ebus");
......@@ -2965,6 +3080,7 @@ static void __exit ehea_module_exit(void)
{
driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
ibmebus_unregister_driver(&ehea_driver);
ehea_destroy_busmap();
}
module_init(ehea_module_init);
......
......@@ -60,6 +60,9 @@ static inline u32 get_longbusy_msecs(int long_busy_ret_code)
}
}
/* Number of pages which can be registered at once by H_REGISTER_HEA_RPAGES */
#define EHEA_MAX_RPAGE 512
/* Notification Event Queue (NEQ) Entry bit masks */
#define NEQE_EVENT_CODE EHEA_BMASK_IBM(2, 7)
#define NEQE_PORTNUM EHEA_BMASK_IBM(32, 47)
......
......@@ -31,6 +31,13 @@
#include "ehea_phyp.h"
#include "ehea_qmr.h"
struct ehea_busmap ehea_bmap = { 0, 0, NULL };
extern u64 ehea_driver_flags;
extern struct workqueue_struct *ehea_driver_wq;
extern struct work_struct ehea_rereg_mr_task;
static void *hw_qpageit_get_inc(struct hw_queue *queue)
{
void *retvalue = hw_qeit_get(queue);
......@@ -547,18 +554,84 @@ int ehea_destroy_qp(struct ehea_qp *qp)
return 0;
}
int ehea_create_busmap( void )
{
u64 vaddr = EHEA_BUSMAP_START;
unsigned long abs_max_pfn = 0;
unsigned long sec_max_pfn;
int i;
/*
* Sections are not in ascending order -> Loop over all sections and
* find the highest PFN to compute the required map size.
*/
ehea_bmap.valid_sections = 0;
for (i = 0; i < NR_MEM_SECTIONS; i++)
if (valid_section_nr(i)) {
sec_max_pfn = section_nr_to_pfn(i);
if (sec_max_pfn > abs_max_pfn)
abs_max_pfn = sec_max_pfn;
ehea_bmap.valid_sections++;
}
ehea_bmap.entries = abs_max_pfn / EHEA_PAGES_PER_SECTION + 1;
ehea_bmap.vaddr = vmalloc(ehea_bmap.entries * sizeof(*ehea_bmap.vaddr));
if (!ehea_bmap.vaddr)
return -ENOMEM;
for (i = 0 ; i < ehea_bmap.entries; i++) {
unsigned long pfn = section_nr_to_pfn(i);
if (pfn_valid(pfn)) {
ehea_bmap.vaddr[i] = vaddr;
vaddr += EHEA_SECTSIZE;
} else
ehea_bmap.vaddr[i] = 0;
}
return 0;
}
void ehea_destroy_busmap( void )
{
vfree(ehea_bmap.vaddr);
}
u64 ehea_map_vaddr(void *caddr)
{
u64 mapped_addr;
unsigned long index = __pa(caddr) >> SECTION_SIZE_BITS;
if (likely(index < ehea_bmap.entries)) {
mapped_addr = ehea_bmap.vaddr[index];
if (likely(mapped_addr))
mapped_addr |= (((unsigned long)caddr)
& (EHEA_SECTSIZE - 1));
else
mapped_addr = -1;
} else
mapped_addr = -1;
if (unlikely(mapped_addr == -1))
if (!test_and_set_bit(__EHEA_STOP_XFER, &ehea_driver_flags))
queue_work(ehea_driver_wq, &ehea_rereg_mr_task);
return mapped_addr;
}
int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr)
{
int i, k, ret;
u64 hret, pt_abs, start, end, nr_pages;
u32 acc_ctrl = EHEA_MR_ACC_CTRL;
int ret;
u64 *pt;
void *pg;
u64 hret, pt_abs, i, j, m, mr_len;
u32 acc_ctrl = EHEA_MR_ACC_CTRL;
start = KERNELBASE;
end = (u64)high_memory;
nr_pages = (end - start) / EHEA_PAGESIZE;
mr_len = ehea_bmap.valid_sections * EHEA_SECTSIZE;
pt = kzalloc(PAGE_SIZE, GFP_KERNEL);
pt = kzalloc(EHEA_MAX_RPAGE * sizeof(u64), GFP_KERNEL);
if (!pt) {
ehea_error("no mem");
ret = -ENOMEM;
......@@ -566,7 +639,8 @@ int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr)
}
pt_abs = virt_to_abs(pt);
hret = ehea_h_alloc_resource_mr(adapter->handle, start, end - start,
hret = ehea_h_alloc_resource_mr(adapter->handle,
EHEA_BUSMAP_START, mr_len,
acc_ctrl, adapter->pd,
&mr->handle, &mr->lkey);
if (hret != H_SUCCESS) {
......@@ -575,49 +649,43 @@ int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr)
goto out;
}
mr->vaddr = KERNELBASE;
k = 0;
while (nr_pages > 0) {
if (nr_pages > 1) {
u64 num_pages = min(nr_pages, (u64)512);
for (i = 0; i < num_pages; i++)
pt[i] = virt_to_abs((void*)(((u64)start) +
((k++) *
EHEA_PAGESIZE)));
hret = ehea_h_register_rpage_mr(adapter->handle,
mr->handle, 0,
0, (u64)pt_abs,
num_pages);
nr_pages -= num_pages;
} else {
u64 abs_adr = virt_to_abs((void*)(((u64)start) +
(k * EHEA_PAGESIZE)));
hret = ehea_h_register_rpage_mr(adapter->handle,
mr->handle, 0,
0, abs_adr,1);
nr_pages--;
}
if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED)) {
ehea_h_free_resource(adapter->handle,
mr->handle, FORCE_FREE);
ehea_error("register_rpage_mr failed");
ret = -EIO;
goto out;
for (i = 0 ; i < ehea_bmap.entries; i++)
if (ehea_bmap.vaddr[i]) {
void *sectbase = __va(i << SECTION_SIZE_BITS);
unsigned long k = 0;
for (j = 0; j < (PAGES_PER_SECTION / EHEA_MAX_RPAGE);
j++) {
for (m = 0; m < EHEA_MAX_RPAGE; m++) {
pg = sectbase + ((k++) * EHEA_PAGESIZE);
pt[m] = virt_to_abs(pg);
}
hret = ehea_h_register_rpage_mr(adapter->handle,
mr->handle,
0, 0, pt_abs,
EHEA_MAX_RPAGE);
if ((hret != H_SUCCESS)
&& (hret != H_PAGE_REGISTERED)) {
ehea_h_free_resource(adapter->handle,
mr->handle,
FORCE_FREE);
ehea_error("register_rpage_mr failed");
ret = -EIO;
goto out;
}
}
}
}
if (hret != H_SUCCESS) {
ehea_h_free_resource(adapter->handle, mr->handle,
FORCE_FREE);
ehea_error("register_rpage failed for last page");
ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE);
ehea_error("registering mr failed");
ret = -EIO;
goto out;
}
mr->vaddr = EHEA_BUSMAP_START;
mr->adapter = adapter;
ret = 0;
out:
......
......@@ -36,8 +36,14 @@
* page size of ehea hardware queues
*/
#define EHEA_PAGESHIFT 12
#define EHEA_PAGESIZE 4096UL
#define EHEA_PAGESHIFT 12
#define EHEA_PAGESIZE (1UL << EHEA_PAGESHIFT)
#define EHEA_SECTSIZE (1UL << 24)
#define EHEA_PAGES_PER_SECTION (EHEA_SECTSIZE >> PAGE_SHIFT)
#if (1UL << SECTION_SIZE_BITS) < EHEA_SECTSIZE
#error eHEA module can't work if kernel sectionsize < ehea sectionsize
#endif
/* Some abbreviations used here:
*
......@@ -372,4 +378,8 @@ int ehea_rem_mr(struct ehea_mr *mr);
void ehea_error_data(struct ehea_adapter *adapter, u64 res_handle);
int ehea_create_busmap( void );
void ehea_destroy_busmap( void );
u64 ehea_map_vaddr(void *caddr);
#endif /* __EHEA_QMR_H__ */
......@@ -550,6 +550,8 @@ union ring_type {
/* PHY defines */
#define PHY_OUI_MARVELL 0x5043
#define PHY_OUI_CICADA 0x03f1
#define PHY_OUI_VITESSE 0x01c1
#define PHY_OUI_REALTEK 0x01c1
#define PHYID1_OUI_MASK 0x03ff
#define PHYID1_OUI_SHFT 6
#define PHYID2_OUI_MASK 0xfc00
......@@ -557,12 +559,36 @@ union ring_type {
#define PHYID2_MODEL_MASK 0x03f0
#define PHY_MODEL_MARVELL_E3016 0x220
#define PHY_MARVELL_E3016_INITMASK 0x0300
#define PHY_INIT1 0x0f000
#define PHY_INIT2 0x0e00
#define PHY_INIT3 0x01000
#define PHY_INIT4 0x0200
#define PHY_INIT5 0x0004
#define PHY_INIT6 0x02000
#define PHY_CICADA_INIT1 0x0f000
#define PHY_CICADA_INIT2 0x0e00
#define PHY_CICADA_INIT3 0x01000
#define PHY_CICADA_INIT4 0x0200
#define PHY_CICADA_INIT5 0x0004
#define PHY_CICADA_INIT6 0x02000
#define PHY_VITESSE_INIT_REG1 0x1f
#define PHY_VITESSE_INIT_REG2 0x10
#define PHY_VITESSE_INIT_REG3 0x11
#define PHY_VITESSE_INIT_REG4 0x12
#define PHY_VITESSE_INIT_MSK1 0xc
#define PHY_VITESSE_INIT_MSK2 0x0180
#define PHY_VITESSE_INIT1 0x52b5
#define PHY_VITESSE_INIT2 0xaf8a
#define PHY_VITESSE_INIT3 0x8
#define PHY_VITESSE_INIT4 0x8f8a
#define PHY_VITESSE_INIT5 0xaf86
#define PHY_VITESSE_INIT6 0x8f86
#define PHY_VITESSE_INIT7 0xaf82
#define PHY_VITESSE_INIT8 0x0100
#define PHY_VITESSE_INIT9 0x8f82
#define PHY_VITESSE_INIT10 0x0
#define PHY_REALTEK_INIT_REG1 0x1f
#define PHY_REALTEK_INIT_REG2 0x19
#define PHY_REALTEK_INIT_REG3 0x13
#define PHY_REALTEK_INIT1 0x0000
#define PHY_REALTEK_INIT2 0x8e00
#define PHY_REALTEK_INIT3 0x0001
#define PHY_REALTEK_INIT4 0xad17
#define PHY_GIGABIT 0x0100
#define PHY_TIMEOUT 0x1
......@@ -1096,6 +1122,28 @@ static int phy_init(struct net_device *dev)
return PHY_ERROR;
}
}
if (np->phy_oui == PHY_OUI_REALTEK) {
if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
return PHY_ERROR;
}
if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) {
printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
return PHY_ERROR;
}
if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) {
printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
return PHY_ERROR;
}
if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) {
printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
return PHY_ERROR;
}
if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
return PHY_ERROR;
}
}
/* set advertise register */
reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
......@@ -1141,14 +1189,14 @@ static int phy_init(struct net_device *dev)
/* phy vendor specific configuration */
if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII) ) {
phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ);
phy_reserved &= ~(PHY_INIT1 | PHY_INIT2);
phy_reserved |= (PHY_INIT3 | PHY_INIT4);
phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2);
phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4);
if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) {
printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
return PHY_ERROR;
}
phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
phy_reserved |= PHY_INIT5;
phy_reserved |= PHY_CICADA_INIT5;
if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) {
printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
return PHY_ERROR;
......@@ -1156,12 +1204,106 @@ static int phy_init(struct net_device *dev)
}
if (np->phy_oui == PHY_OUI_CICADA) {
phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ);
phy_reserved |= PHY_INIT6;
phy_reserved |= PHY_CICADA_INIT6;
if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) {
printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
return PHY_ERROR;
}
}
if (np->phy_oui == PHY_OUI_VITESSE) {
if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT1)) {
printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
return PHY_ERROR;
}
if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT2)) {
printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
return PHY_ERROR;
}
phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
return PHY_ERROR;
}
phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
phy_reserved |= PHY_VITESSE_INIT3;
if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
return PHY_ERROR;
}
if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT4)) {
printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
return PHY_ERROR;
}
if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT5)) {
printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
return PHY_ERROR;
}
phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
phy_reserved |= PHY_VITESSE_INIT3;
if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
return PHY_ERROR;
}
phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
return PHY_ERROR;
}
if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT6)) {
printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
return PHY_ERROR;
}
if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7)) {
printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
return PHY_ERROR;
}
phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
return PHY_ERROR;
}
phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
phy_reserved &= ~PHY_VITESSE_INIT_MSK2;
phy_reserved |= PHY_VITESSE_INIT8;
if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
return PHY_ERROR;
}
if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9)) {
printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
return PHY_ERROR;
}
if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10)) {
printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
return PHY_ERROR;
}
}
if (np->phy_oui == PHY_OUI_REALTEK) {
/* reset could have cleared these out, set them back */
if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
return PHY_ERROR;
}
if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) {
printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
return PHY_ERROR;
}
if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) {
printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
return PHY_ERROR;
}
if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) {
printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
return PHY_ERROR;
}
if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
return PHY_ERROR;
}
}
/* some phys clear out pause advertisment on reset, set it back */
mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg);
......
......@@ -31,7 +31,6 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <asm/ocp.h>
#include <linux/crc32.h>
#include <linux/mii.h>
#include <linux/phy.h>
......
This diff is collapsed.
......@@ -383,11 +383,11 @@ struct macb {
unsigned int rx_pending, tx_pending;
struct delayed_work periodic_task;
struct mutex mdio_mutex;
struct completion mdio_complete;
struct mii_if_info mii;
struct mii_bus mii_bus;
struct phy_device *phy_dev;
unsigned int link;
unsigned int speed;
unsigned int duplex;
};
#endif /* _MACB_H */
......@@ -1060,7 +1060,6 @@ static inline void myri10ge_tx_done(struct myri10ge_priv *mgp, int mcp_index)
struct myri10ge_tx_buf *tx = &mgp->tx;
struct sk_buff *skb;
int idx, len;
int limit = 0;
while (tx->pkt_done != mcp_index) {
idx = tx->done & tx->mask;
......@@ -1091,11 +1090,6 @@ static inline void myri10ge_tx_done(struct myri10ge_priv *mgp, int mcp_index)
bus), len,
PCI_DMA_TODEVICE);
}
/* limit potential for livelock by only handling
* 2 full tx rings per call */
if (unlikely(++limit > 2 * tx->mask))
break;
}
/* start the queue if we've stopped it */
if (netif_queue_stopped(mgp->dev)
......
......@@ -796,12 +796,14 @@ static void free_shared_mem(struct s2io_nic *nic)
struct mac_info *mac_control;
struct config_param *config;
int lst_size, lst_per_page;
struct net_device *dev = nic->dev;
struct net_device *dev;
int page_num = 0;
if (!nic)
return;
dev = nic->dev;
mac_control = &nic->mac_control;
config = &nic->config;
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment