Commit 22f87883 authored by David S. Miller's avatar David S. Miller

Merge branch 'liquidio-next'

Raghu Vatsavayi says:

====================
liquidio updates and bug fixes

Following V2 patchset contains updates and bug fixes for
liquidio driver. This patchset also has changes that you
suggested in vxlan code. Please apply the patches in following
order as some of the patches depend on earlier patches in the
series.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 13c5c240 9fbc48f6
......@@ -19,26 +19,16 @@
* This file may also be available under a different license from Cavium.
* Contact Cavium, Inc. for more information
**********************************************************************/
#include <linux/version.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/kthread.h>
#include <linux/netdevice.h>
#include "octeon_config.h"
#include "liquidio_common.h"
#include "octeon_droq.h"
#include "octeon_iq.h"
#include "response_manager.h"
#include "octeon_device.h"
#include "octeon_nic.h"
#include "octeon_main.h"
#include "octeon_network.h"
#include "cn66xx_regs.h"
#include "cn66xx_device.h"
#include "liquidio_image.h"
#include "octeon_mem_ops.h"
int lio_cn6xxx_soft_reset(struct octeon_device *oct)
{
......@@ -74,9 +64,9 @@ void lio_cn6xxx_enable_error_reporting(struct octeon_device *oct)
u32 val;
pci_read_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, &val);
if (val & 0x000f0000) {
if (val & 0x000c0000) {
dev_err(&oct->pci_dev->dev, "PCI-E Link error detected: 0x%08x\n",
val & 0x000f0000);
val & 0x000c0000);
}
val |= 0xf; /* Enable Link error reporting */
......@@ -229,7 +219,7 @@ void lio_cn6xxx_setup_global_output_regs(struct octeon_device *oct)
/* / Select Packet count instead of bytes for SLI_PKTi_CNTS[CNT] */
octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_BMODE, 0);
/* / Select ES,RO,NS setting from register for Output Queue Packet
/* Select ES, RO, NS setting from register for Output Queue Packet
* Address
*/
octeon_write_csr(oct, CN6XXX_SLI_PKT_DPADDR, 0xFFFFFFFF);
......@@ -547,14 +537,14 @@ static void lio_cn6xxx_get_pcie_qlmport(struct octeon_device *oct)
dev_dbg(&oct->pci_dev->dev, "Using PCIE Port %d\n", oct->pcie_port);
}
void
static void
lio_cn6xxx_process_pcie_error_intr(struct octeon_device *oct, u64 intr64)
{
dev_err(&oct->pci_dev->dev, "Error Intr: 0x%016llx\n",
CVM_CAST64(intr64));
}
int lio_cn6xxx_process_droq_intr_regs(struct octeon_device *oct)
static int lio_cn6xxx_process_droq_intr_regs(struct octeon_device *oct)
{
struct octeon_droq *droq;
int oq_no;
......@@ -579,7 +569,7 @@ int lio_cn6xxx_process_droq_intr_regs(struct octeon_device *oct)
continue;
droq = oct->droq[oq_no];
pkt_count = octeon_droq_check_hw_for_pkts(oct, droq);
pkt_count = octeon_droq_check_hw_for_pkts(droq);
if (pkt_count) {
oct->droq_intr |= (1ULL << oq_no);
if (droq->ops.poll_mode) {
......
......@@ -82,8 +82,6 @@ void lio_cn6xxx_setup_iq_regs(struct octeon_device *oct, u32 iq_no);
void lio_cn6xxx_setup_oq_regs(struct octeon_device *oct, u32 oq_no);
void lio_cn6xxx_enable_io_queues(struct octeon_device *oct);
void lio_cn6xxx_disable_io_queues(struct octeon_device *oct);
void lio_cn6xxx_process_pcie_error_intr(struct octeon_device *oct, u64 intr64);
int lio_cn6xxx_process_droq_intr_regs(struct octeon_device *oct);
irqreturn_t lio_cn6xxx_process_interrupt_regs(void *dev);
void lio_cn6xxx_reinit_regs(struct octeon_device *oct);
void lio_cn6xxx_bar1_idx_setup(struct octeon_device *oct, u64 core_addr,
......
......@@ -19,28 +19,17 @@
* This file may also be available under a different license from Cavium.
* Contact Cavium, Inc. for more information
**********************************************************************/
#include <linux/version.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/kthread.h>
#include <linux/netdevice.h>
#include "octeon_config.h"
#include "liquidio_common.h"
#include "octeon_droq.h"
#include "octeon_iq.h"
#include "response_manager.h"
#include "octeon_device.h"
#include "octeon_nic.h"
#include "octeon_main.h"
#include "octeon_network.h"
#include "cn66xx_regs.h"
#include "cn66xx_device.h"
#include "cn68xx_regs.h"
#include "cn68xx_device.h"
#include "liquidio_image.h"
#include "octeon_mem_ops.h"
static void lio_cn68xx_set_dpi_regs(struct octeon_device *oct)
{
......@@ -129,7 +118,7 @@ static inline void lio_cn68xx_vendor_message_fix(struct octeon_device *oct)
pci_write_config_dword(oct->pci_dev, CN6XXX_PCIE_FLTMSK, val);
}
int lio_is_210nv(struct octeon_device *oct)
static int lio_is_210nv(struct octeon_device *oct)
{
u64 mio_qlm4_cfg = lio_pci_readq(oct, CN6XXX_MIO_QLM4_CFG);
......
......@@ -28,6 +28,5 @@
#define __CN68XX_DEVICE_H__
int lio_setup_cn68xx_octeon_device(struct octeon_device *oct);
int lio_is_210nv(struct octeon_device *oct);
#endif
......@@ -29,7 +29,6 @@
#ifndef __CN68XX_REGS_H__
#define __CN68XX_REGS_H__
#include "cn66xx_regs.h"
/*###################### REQUEST QUEUE #########################*/
......
......@@ -19,13 +19,9 @@
* This file may also be available under a different license from Cavium.
* Contact Cavium, Inc. for more information
**********************************************************************/
#include <linux/version.h>
#include <linux/netdevice.h>
#include <linux/net_tstamp.h>
#include <linux/ethtool.h>
#include <linux/dma-mapping.h>
#include <linux/pci.h>
#include "octeon_config.h"
#include "liquidio_common.h"
#include "octeon_droq.h"
#include "octeon_iq.h"
......@@ -36,9 +32,6 @@
#include "octeon_network.h"
#include "cn66xx_regs.h"
#include "cn66xx_device.h"
#include "cn68xx_regs.h"
#include "cn68xx_device.h"
#include "liquidio_image.h"
static int octnet_get_link_stats(struct net_device *netdev);
......@@ -106,6 +99,7 @@ static const char oct_stats_strings[][ETH_GSTRING_LEN] = {
"tx_tso",
"tx_tso_packets",
"tx_tso_err",
"tx_vxlan",
"mac_tx_total_pkts",
"mac_tx_total_bytes",
......@@ -129,6 +123,9 @@ static const char oct_stats_strings[][ETH_GSTRING_LEN] = {
"rx_err_link",
"rx_err_drop",
"rx_vxlan",
"rx_vxlan_err",
"rx_lro_pkts",
"rx_lro_bytes",
"rx_total_lro",
......@@ -167,6 +164,7 @@ static const char oct_iq_stats_strings[][ETH_GSTRING_LEN] = {
"fw_bytes_sent",
"tso",
"vxlan",
"txq_restart",
};
......@@ -186,6 +184,7 @@ static const char oct_droq_stats_strings[][ETH_GSTRING_LEN] = {
"fw_bytes_received",
"fw_dropped_nodispatch",
"vxlan",
"buffer_alloc_failure",
};
......@@ -340,20 +339,18 @@ static void octnet_mdio_resp_callback(struct octeon_device *oct,
u32 status,
void *buf)
{
struct oct_mdio_cmd_resp *mdio_cmd_rsp;
struct oct_mdio_cmd_context *mdio_cmd_ctx;
struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr;
mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr;
oct = lio_get_device(mdio_cmd_ctx->octeon_id);
if (status) {
dev_err(&oct->pci_dev->dev, "MIDO instruction failed. Status: %llx\n",
CVM_CAST64(status));
ACCESS_ONCE(mdio_cmd_ctx->cond) = -1;
WRITE_ONCE(mdio_cmd_ctx->cond, -1);
} else {
ACCESS_ONCE(mdio_cmd_ctx->cond) = 1;
WRITE_ONCE(mdio_cmd_ctx->cond, 1);
}
wake_up_interruptible(&mdio_cmd_ctx->wc);
}
......@@ -384,7 +381,7 @@ octnet_mdio45_access(struct lio *lio, int op, int loc, int *value)
mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr;
mdio_cmd = (struct oct_mdio_cmd *)sc->virtdptr;
ACCESS_ONCE(mdio_cmd_ctx->cond) = 0;
WRITE_ONCE(mdio_cmd_ctx->cond, 0);
mdio_cmd_ctx->octeon_id = lio_get_device_id(oct_dev);
mdio_cmd->op = op;
mdio_cmd->mdio_addr = loc;
......@@ -423,7 +420,7 @@ octnet_mdio45_access(struct lio *lio, int op, int loc, int *value)
octeon_swap_8B_data((u64 *)(&mdio_cmd_rsp->resp),
sizeof(struct oct_mdio_cmd) / 8);
if (ACCESS_ONCE(mdio_cmd_ctx->cond) == 1) {
if (READ_ONCE(mdio_cmd_ctx->cond) == 1) {
if (!op)
*value = mdio_cmd_rsp->resp.value1;
} else {
......@@ -467,16 +464,14 @@ static int lio_set_phys_id(struct net_device *netdev,
/* Configure Beacon values */
value = LIO68XX_LED_BEACON_CFGON;
ret =
octnet_mdio45_access(lio, 1,
ret = octnet_mdio45_access(lio, 1,
LIO68XX_LED_BEACON_ADDR,
&value);
if (ret)
return ret;
value = LIO68XX_LED_CTRL_CFGON;
ret =
octnet_mdio45_access(lio, 1,
ret = octnet_mdio45_access(lio, 1,
LIO68XX_LED_CTRL_ADDR,
&value);
if (ret)
......@@ -557,7 +552,7 @@ lio_ethtool_get_ringparam(struct net_device *netdev,
tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf6x, lio->ifidx);
}
if (lio->mtu > OCTNET_DEFAULT_FRM_SIZE) {
if (lio->mtu > OCTNET_DEFAULT_FRM_SIZE - OCTNET_FRM_HEADER_SIZE) {
ering->rx_pending = 0;
ering->rx_max_pending = 0;
ering->rx_mini_pending = 0;
......@@ -617,7 +612,8 @@ lio_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
static void
lio_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
struct ethtool_stats *stats __attribute__((unused)),
u64 *data)
{
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct_dev = lio->oct_dev;
......@@ -675,6 +671,10 @@ lio_get_ethtool_stats(struct net_device *netdev,
*fw_err_tso
*/
data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_tso);
/*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
*fw_tx_vxlan
*/
data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tx_vxlan);
/* mac tx statistics */
/*CVMX_BGXX_CMRX_TX_STAT5 */
......@@ -729,6 +729,15 @@ lio_get_ethtool_stats(struct net_device *netdev,
*/
data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_drop);
/*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
*fromwire.fw_rx_vxlan
*/
data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan);
/*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
*fromwire.fw_rx_vxlan_err
*/
data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan_err);
/* LRO */
/*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
*fw_lro_pkts
......@@ -822,6 +831,8 @@ lio_get_ethtool_stats(struct net_device *netdev,
/*tso request*/
data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_gso);
/*vxlan request*/
data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_vxlan);
/*txq restart*/
data[i++] =
CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_restart);
......@@ -858,6 +869,9 @@ lio_get_ethtool_stats(struct net_device *netdev,
CVM_CAST64(oct_dev->droq[j]->stats.bytes_received);
data[i++] =
CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch);
data[i++] =
CVM_CAST64(oct_dev->droq[j]->stats.rx_vxlan);
data[i++] =
CVM_CAST64(oct_dev->droq[j]->stats.rx_alloc_failure);
}
......@@ -945,7 +959,6 @@ static int lio_get_intr_coalesce(struct net_device *netdev,
intr_coal->rx_max_coalesced_frames =
CFG_GET_OQ_INTR_PKT(cn6xxx->conf);
}
iq = oct->instr_queue[lio->linfo.txpciq[0].s.q_no];
intr_coal->tx_max_coalesced_frames = iq->fill_threshold;
break;
......@@ -1043,7 +1056,7 @@ static int octnet_set_intrmod_cfg(struct lio *lio,
return 0;
}
void
static void
octnet_nic_stats_callback(struct octeon_device *oct_dev,
u32 status, void *ptr)
{
......@@ -1083,6 +1096,9 @@ octnet_nic_stats_callback(struct octeon_device *oct_dev,
rstats->fw_err_pko = rsp_rstats->fw_err_pko;
rstats->fw_err_link = rsp_rstats->fw_err_link;
rstats->fw_err_drop = rsp_rstats->fw_err_drop;
rstats->fw_rx_vxlan = rsp_rstats->fw_rx_vxlan;
rstats->fw_rx_vxlan_err = rsp_rstats->fw_rx_vxlan_err;
/* Number of packets that are LROed */
rstats->fw_lro_pkts = rsp_rstats->fw_lro_pkts;
/* Number of octets that are LROed */
......@@ -1127,6 +1143,8 @@ octnet_nic_stats_callback(struct octeon_device *oct_dev,
tstats->fw_tso = rsp_tstats->fw_tso;
tstats->fw_tso_fwd = rsp_tstats->fw_tso_fwd;
tstats->fw_err_tso = rsp_tstats->fw_err_tso;
tstats->fw_tx_vxlan = rsp_tstats->fw_tx_vxlan;
resp->status = 1;
} else {
resp->status = -1;
......@@ -1523,7 +1541,7 @@ static int lio_nway_reset(struct net_device *netdev)
}
/* Return register dump len. */
static int lio_get_regs_len(struct net_device *dev)
static int lio_get_regs_len(struct net_device *dev __attribute__((unused)))
{
return OCT_ETHTOOL_REGDUMP_LEN;
}
......@@ -1667,13 +1685,12 @@ static void lio_get_regs(struct net_device *dev,
int len = 0;
struct octeon_device *oct = lio->oct_dev;
memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN);
regs->version = OCT_ETHTOOL_REGSVER;
switch (oct->chip_id) {
/* case OCTEON_CN73XX: Todo */
case OCTEON_CN68XX:
case OCTEON_CN66XX:
memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN);
len += cn6xxx_read_csr_reg(regbuf + len, oct);
len += cn6xxx_read_config_reg(regbuf + len, oct);
break;
......
......@@ -20,24 +20,12 @@
* Contact Cavium, Inc. for more information
**********************************************************************/
#include <linux/version.h>
#include <linux/module.h>
#include <linux/crc32.h>
#include <linux/dma-mapping.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/ip.h>
#include <net/ip.h>
#include <linux/ipv6.h>
#include <linux/net_tstamp.h>
#include <linux/if_vlan.h>
#include <linux/firmware.h>
#include <linux/ethtool.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/workqueue.h>
#include <linux/interrupt.h>
#include "octeon_config.h"
#include <net/vxlan.h>
#include "liquidio_common.h"
#include "octeon_droq.h"
#include "octeon_iq.h"
......@@ -48,7 +36,6 @@
#include "octeon_network.h"
#include "cn66xx_regs.h"
#include "cn66xx_device.h"
#include "cn68xx_regs.h"
#include "cn68xx_device.h"
#include "liquidio_image.h"
......@@ -251,8 +238,7 @@ static int lio_wait_for_oq_pkts(struct octeon_device *oct)
for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
if (!(oct->io_qmask.oq & (1ULL << i)))
continue;
pkt_cnt += octeon_droq_check_hw_for_pkts(oct,
oct->droq[i]);
pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]);
}
if (pkt_cnt > 0) {
pending_pkts += pkt_cnt;
......@@ -507,7 +493,8 @@ static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev,
* \brief mmio handler
* @param pdev Pointer to PCI device
*/
static pci_ers_result_t liquidio_pcie_mmio_enabled(struct pci_dev *pdev)
static pci_ers_result_t liquidio_pcie_mmio_enabled(
struct pci_dev *pdev __attribute__((unused)))
{
/* We should never hit this since we never ask for a reset for a Fatal
* Error. We always return DISCONNECT in io_error above.
......@@ -523,7 +510,8 @@ static pci_ers_result_t liquidio_pcie_mmio_enabled(struct pci_dev *pdev)
* Restart the card from scratch, as if from a cold-boot. Implementation
* resembles the first-half of the octeon_resume routine.
*/
static pci_ers_result_t liquidio_pcie_slot_reset(struct pci_dev *pdev)
static pci_ers_result_t liquidio_pcie_slot_reset(
struct pci_dev *pdev __attribute__((unused)))
{
/* We should never hit this since we never ask for a reset for a Fatal
* Error. We always return DISCONNECT in io_error above.
......@@ -540,7 +528,7 @@ static pci_ers_result_t liquidio_pcie_slot_reset(struct pci_dev *pdev)
* its OK to resume normal operation. Implementation resembles the
* second-half of the octeon_resume routine.
*/
static void liquidio_pcie_resume(struct pci_dev *pdev)
static void liquidio_pcie_resume(struct pci_dev *pdev __attribute__((unused)))
{
/* Nothing to be done here. */
}
......@@ -551,7 +539,8 @@ static void liquidio_pcie_resume(struct pci_dev *pdev)
* @param pdev Pointer to PCI device
* @param state state to suspend to
*/
static int liquidio_suspend(struct pci_dev *pdev, pm_message_t state)
static int liquidio_suspend(struct pci_dev *pdev __attribute__((unused)),
pm_message_t state __attribute__((unused)))
{
return 0;
}
......@@ -560,7 +549,7 @@ static int liquidio_suspend(struct pci_dev *pdev, pm_message_t state)
* \brief called when resuming
* @param pdev Pointer to PCI device
*/
static int liquidio_resume(struct pci_dev *pdev)
static int liquidio_resume(struct pci_dev *pdev __attribute__((unused)))
{
return 0;
}
......@@ -1104,7 +1093,9 @@ static int octeon_setup_interrupt(struct octeon_device *oct)
* @param pdev PCI device structure
* @param ent unused
*/
static int liquidio_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
static int
liquidio_probe(struct pci_dev *pdev,
const struct pci_device_id *ent __attribute__((unused)))
{
struct octeon_device *oct_dev = NULL;
struct handshake *hs;
......@@ -1267,7 +1258,7 @@ static void octeon_destroy_resources(struct octeon_device *oct)
/* Nothing to be done here either */
break;
} /* end switch(oct->status) */
} /* end switch (oct->status) */
tasklet_kill(&oct_priv->droq_tasklet);
}
......@@ -1724,8 +1715,10 @@ static int liquidio_ptp_settime(struct ptp_clock_info *ptp,
* @param rq request
* @param on is it on
*/
static int liquidio_ptp_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on)
static int
liquidio_ptp_enable(struct ptp_clock_info *ptp __attribute__((unused)),
struct ptp_clock_request *rq __attribute__((unused)),
int on __attribute__((unused)))
{
return -EOPNOTSUPP;
}
......@@ -1866,7 +1859,7 @@ static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs,
* @param buf pointer to resp structure
*/
static void if_cfg_callback(struct octeon_device *oct,
u32 status,
u32 status __attribute__((unused)),
void *buf)
{
struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
......@@ -1880,7 +1873,7 @@ static void if_cfg_callback(struct octeon_device *oct,
if (resp->status)
dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: %llx\n",
CVM_CAST64(resp->status));
ACCESS_ONCE(ctx->cond) = 1;
WRITE_ONCE(ctx->cond, 1);
snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s",
resp->cfg_info.liquidio_firmware_version);
......@@ -1900,7 +1893,8 @@ static void if_cfg_callback(struct octeon_device *oct,
* @returns selected queue number
*/
static u16 select_q(struct net_device *dev, struct sk_buff *skb,
void *accel_priv, select_queue_fallback_t fallback)
void *accel_priv __attribute__((unused)),
select_queue_fallback_t fallback __attribute__((unused)))
{
u32 qindex = 0;
struct lio *lio;
......@@ -1920,7 +1914,7 @@ static u16 select_q(struct net_device *dev, struct sk_buff *skb,
* @param arg - farg registered in droq_ops
*/
static void
liquidio_push_packet(u32 octeon_id,
liquidio_push_packet(u32 octeon_id __attribute__((unused)),
void *skbuff,
u32 len,
union octeon_rh *rh,
......@@ -2000,14 +1994,25 @@ liquidio_push_packet(u32 octeon_id,
}
skb->protocol = eth_type_trans(skb, skb->dev);
if ((netdev->features & NETIF_F_RXCSUM) &&
(rh->r_dh.csum_verified == CNNIC_CSUM_VERIFIED))
(((rh->r_dh.encap_on) &&
(rh->r_dh.csum_verified & CNNIC_TUN_CSUM_VERIFIED)) ||
(!(rh->r_dh.encap_on) &&
(rh->r_dh.csum_verified & CNNIC_CSUM_VERIFIED))))
/* checksum has already been verified */
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
skb->ip_summed = CHECKSUM_NONE;
/* Setting Encapsulation field on basis of status received
* from the firmware
*/
if (rh->r_dh.encap_on) {
skb->encapsulation = 1;
skb->csum_level = 1;
droq->stats.rx_vxlan++;
}
/* inbound VLAN tag */
if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
(rh->r_dh.vlan != 0)) {
......@@ -2120,7 +2125,7 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget)
/**
* \brief Setup input and output queues
* @param octeon_dev octeon device
* @param net_device Net device
* @param ifidx Interface Index
*
* Note: Queues are with respect to the octeon device. Thus
* an input queue is for egress packets, and output queues
......@@ -2331,7 +2336,6 @@ static int liquidio_stop(struct net_device *netdev)
}
dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
module_put(THIS_MODULE);
return 0;
}
......@@ -2342,6 +2346,7 @@ void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr)
struct net_device *netdev = (struct net_device *)nctrl->netpndev;
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
u8 *mac;
switch (nctrl->ncmd.s.cmd) {
case OCTNET_CMD_CHANGE_DEVFLAGS:
......@@ -2349,22 +2354,24 @@ void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr)
break;
case OCTNET_CMD_CHANGE_MACADDR:
/* If command is successful, change the MACADDR. */
netif_info(lio, probe, lio->netdev, " MACAddr changed to 0x%llx\n",
CVM_CAST64(nctrl->udd[0]));
dev_info(&oct->pci_dev->dev, "%s MACAddr changed to 0x%llx\n",
netdev->name, CVM_CAST64(nctrl->udd[0]));
memcpy(netdev->dev_addr, ((u8 *)&nctrl->udd[0]) + 2, ETH_ALEN);
mac = ((u8 *)&nctrl->udd[0]) + 2;
netif_info(lio, probe, lio->netdev,
"%s %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
"MACAddr changed to", mac[0], mac[1],
mac[2], mac[3], mac[4], mac[5]);
break;
case OCTNET_CMD_CHANGE_MTU:
/* If command is successful, change the MTU. */
netif_info(lio, probe, lio->netdev, " MTU Changed from %d to %d\n",
netdev->mtu, nctrl->ncmd.s.param2);
netdev->mtu, nctrl->ncmd.s.param1);
dev_info(&oct->pci_dev->dev, "%s MTU Changed from %d to %d\n",
netdev->name, netdev->mtu,
nctrl->ncmd.s.param2);
netdev->mtu = nctrl->ncmd.s.param2;
nctrl->ncmd.s.param1);
rtnl_lock();
netdev->mtu = nctrl->ncmd.s.param1;
call_netdevice_notifiers(NETDEV_CHANGEMTU, netdev);
rtnl_unlock();
break;
case OCTNET_CMD_GPIO_ACCESS:
......@@ -2410,6 +2417,55 @@ void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr)
netdev->name);
break;
/* Case to handle "OCTNET_CMD_TNL_RX_CSUM_CTL"
* Command passed by NIC driver
*/
case OCTNET_CMD_TNL_RX_CSUM_CTL:
if (nctrl->ncmd.s.param1 == OCTNET_CMD_RXCSUM_ENABLE) {
netif_info(lio, probe, lio->netdev,
"%s RX Checksum Offload Enabled\n",
netdev->name);
} else if (nctrl->ncmd.s.param1 ==
OCTNET_CMD_RXCSUM_DISABLE) {
netif_info(lio, probe, lio->netdev,
"%s RX Checksum Offload Disabled\n",
netdev->name);
}
break;
/* Case to handle "OCTNET_CMD_TNL_TX_CSUM_CTL"
* Command passed by NIC driver
*/
case OCTNET_CMD_TNL_TX_CSUM_CTL:
if (nctrl->ncmd.s.param1 == OCTNET_CMD_TXCSUM_ENABLE) {
netif_info(lio, probe, lio->netdev,
"%s TX Checksum Offload Enabled\n",
netdev->name);
} else if (nctrl->ncmd.s.param1 ==
OCTNET_CMD_TXCSUM_DISABLE) {
netif_info(lio, probe, lio->netdev,
"%s TX Checksum Offload Disabled\n",
netdev->name);
}
break;
/* Case to handle "OCTNET_CMD_VXLAN_PORT_CONFIG"
* Command passed by NIC driver
*/
case OCTNET_CMD_VXLAN_PORT_CONFIG:
if (nctrl->ncmd.s.more == OCTNET_CMD_VXLAN_PORT_ADD) {
netif_info(lio, probe, lio->netdev,
"%s VxLAN Destination UDP PORT:%d ADDED\n",
netdev->name,
nctrl->ncmd.s.param1);
} else if (nctrl->ncmd.s.more ==
OCTNET_CMD_VXLAN_PORT_DEL) {
netif_info(lio, probe, lio->netdev,
"%s VxLAN Destination UDP PORT:%d DELETED\n",
netdev->name,
nctrl->ncmd.s.param1);
}
break;
case OCTNET_CMD_SET_FLOW_CTL:
netif_info(lio, probe, lio->netdev, "Set RX/TX flow control parameters\n");
......@@ -2465,7 +2521,7 @@ static void liquidio_set_mcast_list(struct net_device *netdev)
struct octnic_ctrl_pkt nctrl;
struct netdev_hw_addr *ha;
u64 *mc;
int ret, i;
int ret;
int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR);
memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
......@@ -2481,7 +2537,6 @@ static void liquidio_set_mcast_list(struct net_device *netdev)
nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
/* copy all the addresses into the udd */
i = 0;
mc = &nctrl.udd[0];
netdev_for_each_mc_addr(ha, netdev) {
*mc = 0;
......@@ -2604,18 +2659,16 @@ static int liquidio_change_mtu(struct net_device *netdev, int new_mtu)
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
struct octnic_ctrl_pkt nctrl;
int max_frm_size = new_mtu + OCTNET_FRM_HEADER_SIZE;
int ret = 0;
/* Limit the MTU to make sure the ethernet packets are between 64 bytes
* and 65535 bytes
/* Limit the MTU to make sure the ethernet packets are between 68 bytes
* and 16000 bytes
*/
if ((max_frm_size < OCTNET_MIN_FRM_SIZE) ||
(max_frm_size > OCTNET_MAX_FRM_SIZE)) {
if ((new_mtu < LIO_MIN_MTU_SIZE) ||
(new_mtu > LIO_MAX_MTU_SIZE)) {
dev_err(&oct->pci_dev->dev, "Invalid MTU: %d\n", new_mtu);
dev_err(&oct->pci_dev->dev, "Valid range %d and %d\n",
(OCTNET_MIN_FRM_SIZE - OCTNET_FRM_HEADER_SIZE),
(OCTNET_MAX_FRM_SIZE - OCTNET_FRM_HEADER_SIZE));
LIO_MIN_MTU_SIZE, LIO_MAX_MTU_SIZE);
return -EINVAL;
}
......@@ -2646,7 +2699,7 @@ static int liquidio_change_mtu(struct net_device *netdev, int new_mtu)
* @param ifr interface request
* @param cmd command
*/
static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
{
struct hwtstamp_config conf;
struct lio *lio = GET_LIO(netdev);
......@@ -2707,7 +2760,7 @@ static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
switch (cmd) {
case SIOCSHWTSTAMP:
return hwtstamp_ioctl(netdev, ifr, cmd);
return hwtstamp_ioctl(netdev, ifr);
default:
return -EOPNOTSUPP;
}
......@@ -2886,12 +2939,12 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
/* defer sending if queue is full */
stats->tx_iq_busy++;
netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
ndata.q_no);
lio->txq);
return NETDEV_TX_BUSY;
}
}
/* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu: %d, q_no:%d\n",
* lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no );
* lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no);
*/
ndata.datasize = skb->len;
......@@ -2899,9 +2952,14 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
cmdsetup.u64 = 0;
cmdsetup.s.iq_no = iq_no;
if (skb->ip_summed == CHECKSUM_PARTIAL)
if (skb->ip_summed == CHECKSUM_PARTIAL) {
if (skb->encapsulation) {
cmdsetup.s.tnl_csum = 1;
stats->tx_vxlan++;
} else {
cmdsetup.s.transport_csum = 1;
}
}
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
cmdsetup.s.timestamp = 1;
......@@ -2910,6 +2968,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
if (skb_shinfo(skb)->nr_frags == 0) {
cmdsetup.s.u.datasize = skb->len;
octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
/* Offload checksum calculation for TCP/UDP packets */
dptr = dma_map_single(&oct->pci_dev->dev,
skb->data,
......@@ -3124,6 +3183,72 @@ static int liquidio_vlan_rx_kill_vid(struct net_device *netdev,
return ret;
}
/** Sending command to enable/disable RX checksum offload
* @param netdev pointer to network device
* @param command OCTNET_CMD_TNL_RX_CSUM_CTL
* @param rx_cmd_bit OCTNET_CMD_RXCSUM_ENABLE/
* OCTNET_CMD_RXCSUM_DISABLE
* @returns SUCCESS or FAILURE
*/
int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
u8 rx_cmd)
{
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
struct octnic_ctrl_pkt nctrl;
int ret = 0;
nctrl.ncmd.u64 = 0;
nctrl.ncmd.s.cmd = command;
nctrl.ncmd.s.param1 = rx_cmd;
nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
nctrl.wait_time = 100;
nctrl.netpndev = (u64)netdev;
nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
if (ret < 0) {
dev_err(&oct->pci_dev->dev,
"DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n",
ret);
}
return ret;
}
/** Sending command to add/delete VxLAN UDP port to firmware
* @param netdev pointer to network device
* @param command OCTNET_CMD_VXLAN_PORT_CONFIG
* @param vxlan_port VxLAN port to be added or deleted
* @param vxlan_cmd_bit OCTNET_CMD_VXLAN_PORT_ADD,
* OCTNET_CMD_VXLAN_PORT_DEL
* @returns SUCCESS or FAILURE
*/
static int liquidio_vxlan_port_command(struct net_device *netdev, int command,
u16 vxlan_port, u8 vxlan_cmd_bit)
{
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
struct octnic_ctrl_pkt nctrl;
int ret = 0;
nctrl.ncmd.u64 = 0;
nctrl.ncmd.s.cmd = command;
nctrl.ncmd.s.more = vxlan_cmd_bit;
nctrl.ncmd.s.param1 = vxlan_port;
nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
nctrl.wait_time = 100;
nctrl.netpndev = (u64)netdev;
nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
if (ret < 0) {
dev_err(&oct->pci_dev->dev,
"VxLAN port add/delete failed in core (ret:0x%x)\n",
ret);
}
return ret;
}
int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1)
{
struct lio *lio = GET_LIO(netdev);
......@@ -3204,9 +3329,48 @@ static int liquidio_set_features(struct net_device *netdev,
liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE,
OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
/* Sending command to firmware to enable/disable RX checksum
* offload settings using ethtool
*/
if (!(netdev->features & NETIF_F_RXCSUM) &&
(lio->enc_dev_capability & NETIF_F_RXCSUM) &&
(features & NETIF_F_RXCSUM))
liquidio_set_rxcsum_command(netdev,
OCTNET_CMD_TNL_RX_CSUM_CTL,
OCTNET_CMD_RXCSUM_ENABLE);
else if ((netdev->features & NETIF_F_RXCSUM) &&
(lio->enc_dev_capability & NETIF_F_RXCSUM) &&
!(features & NETIF_F_RXCSUM))
liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
OCTNET_CMD_RXCSUM_DISABLE);
return 0;
}
static void liquidio_add_vxlan_port(struct net_device *netdev,
struct udp_tunnel_info *ti)
{
if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
return;
liquidio_vxlan_port_command(netdev,
OCTNET_CMD_VXLAN_PORT_CONFIG,
htons(ti->port),
OCTNET_CMD_VXLAN_PORT_ADD);
}
static void liquidio_del_vxlan_port(struct net_device *netdev,
struct udp_tunnel_info *ti)
{
if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
return;
liquidio_vxlan_port_command(netdev,
OCTNET_CMD_VXLAN_PORT_CONFIG,
htons(ti->port),
OCTNET_CMD_VXLAN_PORT_DEL);
}
static struct net_device_ops lionetdevops = {
.ndo_open = liquidio_open,
.ndo_stop = liquidio_stop,
......@@ -3222,6 +3386,8 @@ static struct net_device_ops lionetdevops = {
.ndo_do_ioctl = liquidio_ioctl,
.ndo_fix_features = liquidio_fix_features,
.ndo_set_features = liquidio_set_features,
.ndo_udp_tunnel_add = liquidio_add_vxlan_port,
.ndo_udp_tunnel_del = liquidio_del_vxlan_port,
};
/** \brief Entry point for the liquidio module
......@@ -3323,7 +3489,6 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
struct liquidio_if_cfg_resp *resp;
struct octdev_props *props;
int retval, num_iqueues, num_oqueues;
int num_cpus = num_online_cpus();
union oct_nic_if_cfg if_cfg;
unsigned int base_queue;
unsigned int gmx_port_id;
......@@ -3365,14 +3530,11 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
gmx_port_id =
CFG_GET_GMXID_NIC_IF(octeon_get_conf(octeon_dev), i);
ifidx_or_pfnum = i;
if (num_iqueues > num_cpus)
num_iqueues = num_cpus;
if (num_oqueues > num_cpus)
num_oqueues = num_cpus;
dev_dbg(&octeon_dev->pci_dev->dev,
"requesting config for interface %d, iqs %d, oqs %d\n",
ifidx_or_pfnum, num_iqueues, num_oqueues);
ACCESS_ONCE(ctx->cond) = 0;
WRITE_ONCE(ctx->cond, 0);
ctx->octeon_id = lio_get_device_id(octeon_dev);
init_waitqueue_head(&ctx->wc);
......@@ -3390,7 +3552,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
sc->callback = if_cfg_callback;
sc->callback_arg = sc;
sc->wait_time = 1000;
sc->wait_time = 3000;
retval = octeon_send_soft_command(octeon_dev, sc);
if (retval == IQ_SEND_FAILED) {
......@@ -3479,6 +3641,22 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
| NETIF_F_LRO;
netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
/* Copy of transmit encapsulation capabilities:
* TSO, TSO6, Checksums for this device
*/
lio->enc_dev_capability = NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
| NETIF_F_GSO_UDP_TUNNEL
| NETIF_F_HW_CSUM | NETIF_F_SG
| NETIF_F_RXCSUM
| NETIF_F_TSO | NETIF_F_TSO6
| NETIF_F_LRO;
netdev->hw_enc_features = (lio->enc_dev_capability &
~NETIF_F_LRO);
lio->dev_capability |= NETIF_F_GSO_UDP_TUNNEL;
netdev->vlan_features = lio->dev_capability;
/* Add any unchangeable hw features */
lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER |
......@@ -3561,6 +3739,15 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
ifstate_set(lio, LIO_IFSTATE_REGISTERED);
/* Sending command to firmware to enable Rx checksum offload
* by default at the time of setup of Liquidio driver for
* this device
*/
liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
OCTNET_CMD_RXCSUM_ENABLE);
liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL,
OCTNET_CMD_TXCSUM_ENABLE);
dev_dbg(&octeon_dev->pci_dev->dev,
"NIC ifidx:%d Setup successful\n", i);
......@@ -3771,6 +3958,7 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
/* Release any previously allocated queues */
for (j = 0; j < octeon_dev->num_oqs; j++)
octeon_delete_droq(octeon_dev, j);
return 1;
}
atomic_set(&octeon_dev->status, OCT_DEV_DROQ_INIT_DONE);
......@@ -3793,7 +3981,8 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
/* Setup the interrupt handler and record the INT SUM register address
*/
octeon_setup_interrupt(octeon_dev);
if (octeon_setup_interrupt(octeon_dev))
return 1;
/* Enable Octeon device interrupts */
octeon_dev->fn_list.enable_interrupt(octeon_dev->chip);
......
......@@ -34,6 +34,7 @@
#define LIQUIDIO_MICRO_VERSION ".1"
#define LIQUIDIO_PACKAGE ""
#define LIQUIDIO_VERSION "1.4.1"
#define CONTROL_IQ 0
/** Tag types used by Octeon cores in its work. */
enum octeon_tag_type {
......@@ -216,6 +217,13 @@ static inline void add_sg_size(struct octeon_sg_entry *sg_entry,
#define OCTNET_CMD_ENABLE_VLAN_FILTER 0x16
#define OCTNET_CMD_ADD_VLAN_FILTER 0x17
#define OCTNET_CMD_DEL_VLAN_FILTER 0x18
#define OCTNET_CMD_VXLAN_PORT_CONFIG 0x19
#define OCTNET_CMD_VXLAN_PORT_ADD 0x0
#define OCTNET_CMD_VXLAN_PORT_DEL 0x1
#define OCTNET_CMD_RXCSUM_ENABLE 0x0
#define OCTNET_CMD_RXCSUM_DISABLE 0x1
#define OCTNET_CMD_TXCSUM_ENABLE 0x0
#define OCTNET_CMD_TXCSUM_DISABLE 0x1
/* RX(packets coming from wire) Checksum verification flags */
/* TCP/UDP csum */
......@@ -288,7 +296,7 @@ union octnet_cmd {
#define OCTNET_CMD_SIZE (sizeof(union octnet_cmd))
/* Instruction Header (DPI - CN23xx) - for OCTEON-III models */
/* Instruction Header(DPI) - for OCTEON-III models */
struct octeon_instr_ih3 {
#ifdef __BIG_ENDIAN_BITFIELD
......@@ -338,7 +346,7 @@ struct octeon_instr_ih3 {
#endif
};
/* Optional PKI Instruction Header(PKI IH) - for OCTEON CN23XX models */
/* Optional PKI Instruction Header(PKI IH) - for OCTEON-III models */
/** BIG ENDIAN format. */
struct octeon_instr_pki_ih3 {
#ifdef __BIG_ENDIAN_BITFIELD
......@@ -533,6 +541,8 @@ union octeon_rh {
u64 priority:3;
u64 csum_verified:3; /** checksum verified. */
u64 has_hwtstamp:1; /** Has hardware timestamp. 1 = yes. */
u64 encap_on:1;
u64 has_hash:1; /** Has hash (rth or rss). 1 = yes. */
} r_dh;
struct {
u64 opcode:4;
......@@ -542,7 +552,8 @@ union octeon_rh {
u64 num_gmx_ports:8;
u64 max_nic_ports:10;
u64 app_cap_flags:4;
u64 app_mode:16;
u64 app_mode:8;
u64 pkind:8;
} r_core_drv_init;
struct {
u64 opcode:4;
......@@ -562,6 +573,8 @@ union octeon_rh {
u64 opcode:4;
} r;
struct {
u64 has_hash:1; /** Has hash (rth or rss). 1 = yes. */
u64 encap_on:1;
u64 has_hwtstamp:1; /** 1 = has hwtstamp */
u64 csum_verified:3; /** checksum verified. */
u64 priority:3;
......@@ -572,7 +585,8 @@ union octeon_rh {
u64 opcode:4;
} r_dh;
struct {
u64 app_mode:16;
u64 pkind:8;
u64 app_mode:8;
u64 app_cap_flags:4;
u64 max_nic_ports:10;
u64 num_gmx_ports:8;
......@@ -630,9 +644,11 @@ union oct_link_status {
u64 autoneg:1;
u64 if_mode:5;
u64 pause:1;
u64 reserved:16;
u64 flashing:1;
u64 reserved:15;
#else
u64 reserved:16;
u64 reserved:15;
u64 flashing:1;
u64 pause:1;
u64 if_mode:5;
u64 autoneg:1;
......@@ -736,6 +752,8 @@ struct nic_rx_stats {
u64 fw_err_pko;
u64 fw_err_link;
u64 fw_err_drop;
u64 fw_rx_vxlan;
u64 fw_rx_vxlan_err;
/* LRO */
u64 fw_lro_pkts; /* Number of packets that are LROed */
......@@ -776,6 +794,7 @@ struct nic_tx_stats {
u64 fw_err_tso;
u64 fw_tso; /* number of tso requests */
u64 fw_tso_fwd; /* number of packets segmented in tso */
u64 fw_tx_vxlan;
};
struct oct_link_stats {
......@@ -856,9 +875,9 @@ union oct_nic_if_cfg {
u64 num_iqueues:16;
u64 num_oqueues:16;
u64 gmx_port_id:8;
u64 reserved:8;
u64 vf_id:8;
#else
u64 reserved:8;
u64 vf_id:8;
u64 gmx_port_id:8;
u64 num_oqueues:16;
u64 num_iqueues:16;
......
......@@ -226,7 +226,7 @@ struct octeon_oq_config {
*/
u64 refill_threshold:16;
/** If set, the Output queue uses info-pointer mode. (Default: 1 ) */
/** If set, the Output queue uses info-pointer mode. (Default: 1) */
u64 info_ptr:32;
/* Max number of OQs available */
......@@ -236,7 +236,7 @@ struct octeon_oq_config {
/* Max number of OQs available */
u64 max_oqs:8;
/** If set, the Output queue uses info-pointer mode. (Default: 1 ) */
/** If set, the Output queue uses info-pointer mode. (Default: 1) */
u64 info_ptr:32;
/** The number of buffers that were consumed during packet processing by
......
......@@ -23,27 +23,14 @@
/**
* @file octeon_console.c
*/
#include <linux/version.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/kthread.h>
#include <linux/netdevice.h>
#include "octeon_config.h"
#include "liquidio_common.h"
#include "octeon_droq.h"
#include "octeon_iq.h"
#include "response_manager.h"
#include "octeon_device.h"
#include "octeon_nic.h"
#include "octeon_main.h"
#include "octeon_network.h"
#include "cn66xx_regs.h"
#include "cn66xx_device.h"
#include "cn68xx_regs.h"
#include "cn68xx_device.h"
#include "liquidio_image.h"
#include "octeon_mem_ops.h"
static void octeon_remote_lock(void);
......@@ -51,6 +38,8 @@ static void octeon_remote_unlock(void);
static u64 cvmx_bootmem_phy_named_block_find(struct octeon_device *oct,
const char *name,
u32 flags);
static int octeon_console_read(struct octeon_device *oct, u32 console_num,
char *buffer, u32 buf_size);
#define MIN(a, b) min((a), (b))
#define CAST_ULL(v) ((u64)(v))
......@@ -170,8 +159,8 @@ struct octeon_pci_console_desc {
offsetof(struct cvmx_bootmem_desc, field), \
SIZEOF_FIELD(struct cvmx_bootmem_desc, field))
#define __cvmx_bootmem_lock(flags)
#define __cvmx_bootmem_unlock(flags)
#define __cvmx_bootmem_lock(flags) (flags = flags)
#define __cvmx_bootmem_unlock(flags) (flags = flags)
/**
* This macro returns a member of the
......@@ -234,7 +223,7 @@ static void CVMX_BOOTMEM_NAMED_GET_NAME(struct octeon_device *oct,
u32 len)
{
addr += offsetof(struct cvmx_bootmem_named_block_desc, name);
octeon_pci_read_core_mem(oct, addr, str, len);
octeon_pci_read_core_mem(oct, addr, (u8 *)str, len);
str[len] = 0;
}
......@@ -323,6 +312,9 @@ static u64 cvmx_bootmem_phy_named_block_find(struct octeon_device *oct,
if (name && named_size) {
char *name_tmp =
kmalloc(name_length + 1, GFP_KERNEL);
if (!name_tmp)
break;
CVMX_BOOTMEM_NAMED_GET_NAME(oct, named_addr,
name_tmp,
name_length);
......@@ -383,7 +375,7 @@ static void octeon_remote_unlock(void)
int octeon_console_send_cmd(struct octeon_device *oct, char *cmd_str,
u32 wait_hundredths)
{
u32 len = strlen(cmd_str);
u32 len = (u32)strlen(cmd_str);
dev_dbg(&oct->pci_dev->dev, "sending \"%s\" to bootloader\n", cmd_str);
......@@ -440,8 +432,7 @@ int octeon_wait_for_bootloader(struct octeon_device *oct,
}
static void octeon_console_handle_result(struct octeon_device *oct,
size_t console_num,
char *buffer, s32 bytes_read)
size_t console_num)
{
struct octeon_console *console;
......@@ -492,7 +483,7 @@ static void check_console(struct work_struct *work)
struct octeon_console *console;
struct cavium_wk *wk = (struct cavium_wk *)work;
struct octeon_device *oct = (struct octeon_device *)wk->ctxptr;
size_t console_num = wk->ctxul;
u32 console_num = (u32)wk->ctxul;
u32 delay;
console = &oct->console[console_num];
......@@ -505,20 +496,17 @@ static void check_console(struct work_struct *work)
*/
bytes_read =
octeon_console_read(oct, console_num, console_buffer,
sizeof(console_buffer) - 1, 0);
sizeof(console_buffer) - 1);
if (bytes_read > 0) {
total_read += bytes_read;
if (console->waiting) {
octeon_console_handle_result(oct, console_num,
console_buffer,
bytes_read);
}
if (console->waiting)
octeon_console_handle_result(oct, console_num);
if (octeon_console_debug_enabled(console_num)) {
output_console_line(oct, console, console_num,
console_buffer, bytes_read);
}
} else if (bytes_read < 0) {
dev_err(&oct->pci_dev->dev, "Error reading console %lu, ret=%d\n",
dev_err(&oct->pci_dev->dev, "Error reading console %u, ret=%d\n",
console_num, bytes_read);
}
......@@ -530,7 +518,7 @@ static void check_console(struct work_struct *work)
*/
if (octeon_console_debug_enabled(console_num) &&
(total_read == 0) && (console->leftover[0])) {
dev_info(&oct->pci_dev->dev, "%lu: %s\n",
dev_info(&oct->pci_dev->dev, "%u: %s\n",
console_num, console->leftover);
console->leftover[0] = '\0';
}
......@@ -675,8 +663,8 @@ static inline int octeon_console_avail_bytes(u32 buffer_size,
octeon_console_free_bytes(buffer_size, wr_idx, rd_idx);
}
int octeon_console_read(struct octeon_device *oct, u32 console_num,
char *buffer, u32 buf_size, u32 flags)
static int octeon_console_read(struct octeon_device *oct, u32 console_num,
char *buffer, u32 buf_size)
{
int bytes_to_read;
u32 rd_idx, wr_idx;
......@@ -712,7 +700,7 @@ int octeon_console_read(struct octeon_device *oct, u32 console_num,
bytes_to_read = console->buffer_size - rd_idx;
octeon_pci_read_core_mem(oct, console->output_base_addr + rd_idx,
buffer, bytes_to_read);
(u8 *)buffer, bytes_to_read);
octeon_write_device_mem32(oct, console->addr +
offsetof(struct octeon_pci_console,
output_read_index),
......
......@@ -19,27 +19,19 @@
* This file may also be available under a different license from Cavium.
* Contact Cavium, Inc. for more information
**********************************************************************/
#include <linux/types.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/crc32.h>
#include <linux/kthread.h>
#include <linux/netdevice.h>
#include <linux/vmalloc.h>
#include "octeon_config.h"
#include "liquidio_common.h"
#include "octeon_droq.h"
#include "octeon_iq.h"
#include "response_manager.h"
#include "octeon_device.h"
#include "octeon_nic.h"
#include "octeon_main.h"
#include "octeon_network.h"
#include "cn66xx_regs.h"
#include "cn66xx_device.h"
#include "cn68xx_regs.h"
#include "cn68xx_device.h"
#include "liquidio_image.h"
#include "octeon_mem_ops.h"
......@@ -652,15 +644,15 @@ int octeon_download_firmware(struct octeon_device *oct, const u8 *data,
void octeon_free_device_mem(struct octeon_device *oct)
{
u32 i;
int i;
for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
/* could check mask as well */
if (oct->io_qmask.oq & (1ULL << i))
vfree(oct->droq[i]);
}
for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
/* could check mask as well */
if (oct->io_qmask.iq & (1ULL << i))
vfree(oct->instr_queue[i]);
}
......@@ -752,13 +744,11 @@ struct octeon_device *octeon_allocate_device(u32 pci_id,
/* this function is only for setting up the first queue */
int octeon_setup_instr_queues(struct octeon_device *oct)
{
u32 num_iqs = 0;
u32 num_descs = 0;
u32 iq_no = 0;
union oct_txpciq txpciq;
int numa_node = cpu_to_node(iq_no % num_online_cpus());
num_iqs = 1;
/* this causes queue 0 to be default queue */
if (OCTEON_CN6XXX(oct))
num_descs =
......@@ -793,13 +783,11 @@ int octeon_setup_instr_queues(struct octeon_device *oct)
int octeon_setup_output_queues(struct octeon_device *oct)
{
u32 num_oqs = 0;
u32 num_descs = 0;
u32 desc_size = 0;
u32 oq_no = 0;
int numa_node = cpu_to_node(oq_no % num_online_cpus());
num_oqs = 1;
/* this causes queue 0 to be default queue */
if (OCTEON_CN6XXX(oct)) {
num_descs =
......@@ -1019,79 +1007,6 @@ octeon_register_dispatch_fn(struct octeon_device *oct,
return 0;
}
/* octeon_unregister_dispatch_fn
* Parameters:
* oct - octeon device
* opcode - driver should unregister the function for this opcode
* subcode - driver should unregister the function for this subcode
* Description:
* Unregister the function set for this opcode+subcode.
* Returns:
* Success: 0
* Failure: 1
* Locks:
* No locks are held.
*/
int
octeon_unregister_dispatch_fn(struct octeon_device *oct, u16 opcode,
u16 subcode)
{
int retval = 0;
u32 idx;
struct list_head *dispatch, *dfree = NULL, *tmp2;
u16 combined_opcode = OPCODE_SUBCODE(opcode, subcode);
idx = combined_opcode & OCTEON_OPCODE_MASK;
spin_lock_bh(&oct->dispatch.lock);
if (oct->dispatch.count == 0) {
spin_unlock_bh(&oct->dispatch.lock);
dev_err(&oct->pci_dev->dev,
"No dispatch functions registered for this device\n");
return 1;
}
if (oct->dispatch.dlist[idx].opcode == combined_opcode) {
dispatch = &oct->dispatch.dlist[idx].list;
if (dispatch->next != dispatch) {
dispatch = dispatch->next;
oct->dispatch.dlist[idx].opcode =
((struct octeon_dispatch *)dispatch)->opcode;
oct->dispatch.dlist[idx].dispatch_fn =
((struct octeon_dispatch *)
dispatch)->dispatch_fn;
oct->dispatch.dlist[idx].arg =
((struct octeon_dispatch *)dispatch)->arg;
list_del(dispatch);
dfree = dispatch;
} else {
oct->dispatch.dlist[idx].opcode = 0;
oct->dispatch.dlist[idx].dispatch_fn = NULL;
oct->dispatch.dlist[idx].arg = NULL;
}
} else {
retval = 1;
list_for_each_safe(dispatch, tmp2,
&(oct->dispatch.dlist[idx].
list)) {
if (((struct octeon_dispatch *)dispatch)->opcode ==
combined_opcode) {
list_del(dispatch);
dfree = dispatch;
retval = 0;
}
}
}
if (!retval)
oct->dispatch.count--;
spin_unlock_bh(&oct->dispatch.lock);
vfree(dfree);
return retval;
}
int octeon_core_drv_init(struct octeon_recv_info *recv_info, void *buf)
{
u32 i;
......
......@@ -221,7 +221,7 @@ struct octeon_fn_list {
/* Structure for named memory blocks
* Number of descriptors
* available can be changed without affecting compatiblity,
* available can be changed without affecting compatibility,
* but name length changes require a bump in the bootmem
* descriptor version
* Note: This structure must be naturally 64 bit aligned, as a single
......@@ -254,7 +254,7 @@ struct oct_fw_info {
struct cavium_wk {
struct delayed_work work;
void *ctxptr;
size_t ctxul;
u64 ctxul;
};
struct cavium_wq {
......@@ -585,8 +585,7 @@ int octeon_add_console(struct octeon_device *oct, u32 console_num);
int octeon_console_write(struct octeon_device *oct, u32 console_num,
char *buffer, u32 write_request_size, u32 flags);
int octeon_console_write_avail(struct octeon_device *oct, u32 console_num);
int octeon_console_read(struct octeon_device *oct, u32 console_num,
char *buffer, u32 buf_size, u32 flags);
int octeon_console_read_avail(struct octeon_device *oct, u32 console_num);
/** Removes all attached consoles. */
......
......@@ -19,30 +19,18 @@
* This file may also be available under a different license from Cavium.
* Contact Cavium, Inc. for more information
**********************************************************************/
#include <linux/version.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/pci.h>
#include <linux/kthread.h>
#include <linux/netdevice.h>
#include <linux/vmalloc.h>
#include "octeon_config.h"
#include "liquidio_common.h"
#include "octeon_droq.h"
#include "octeon_iq.h"
#include "response_manager.h"
#include "octeon_device.h"
#include "octeon_nic.h"
#include "octeon_main.h"
#include "octeon_network.h"
#include "cn66xx_regs.h"
#include "cn66xx_device.h"
#include "cn68xx_regs.h"
#include "cn68xx_device.h"
#include "liquidio_image.h"
#include "octeon_mem_ops.h"
/* #define CAVIUM_ONLY_PERF_MODE */
#define CVM_MIN(d1, d2) (((d1) < (d2)) ? (d1) : (d2))
#define CVM_MAX(d1, d2) (((d1) > (d2)) ? (d1) : (d2))
......@@ -104,8 +92,12 @@ static inline void *octeon_get_dispatch_arg(struct octeon_device *octeon_dev,
return fn_arg;
}
u32 octeon_droq_check_hw_for_pkts(struct octeon_device *oct,
struct octeon_droq *droq)
/** Check for packets on Droq. This function should be called with
* lock held.
* @param droq - Droq on which count is checked.
* @return Returns packet count.
*/
u32 octeon_droq_check_hw_for_pkts(struct octeon_droq *droq)
{
u32 pkt_count = 0;
......@@ -196,7 +188,6 @@ octeon_droq_setup_ring_buffers(struct octeon_device *oct,
droq->recv_buf_list[i].buffer = buf;
droq->recv_buf_list[i].data = get_rbd(buf);
droq->info_list[i].length = 0;
/* map ring buffers into memory */
......@@ -569,7 +560,9 @@ octeon_droq_dispatch_pkt(struct octeon_device *oct,
droq->stats.dropped_nomem++;
}
} else {
dev_err(&oct->pci_dev->dev, "DROQ: No dispatch function\n");
dev_err(&oct->pci_dev->dev, "DROQ: No dispatch function (opcode %u/%u)\n",
(unsigned int)rh->r.opcode,
(unsigned int)rh->r.subcode);
droq->stats.dropped_nodispatch++;
} /* else (dispatch_fn ... */
......@@ -654,6 +647,7 @@ octeon_droq_fast_process_packets(struct octeon_device *oct,
pg_info->page = NULL;
droq->recv_buf_list[droq->read_idx].buffer =
NULL;
INCR_INDEX_BY1(droq->read_idx, droq->max_count);
droq->refill_count++;
} else {
......@@ -748,7 +742,7 @@ octeon_droq_process_packets(struct octeon_device *oct,
if (pkt_count > budget)
pkt_count = budget;
/* Grab the lock */
/* Grab the droq lock */
spin_lock(&droq->lock);
pkts_processed = octeon_droq_fast_process_packets(oct, droq, pkt_count);
......@@ -810,7 +804,7 @@ octeon_droq_process_poll_pkts(struct octeon_device *oct,
total_pkts_processed += pkts_processed;
octeon_droq_check_hw_for_pkts(oct, droq);
octeon_droq_check_hw_for_pkts(droq);
}
spin_unlock(&droq->lock);
......@@ -834,18 +828,6 @@ octeon_process_droq_poll_cmd(struct octeon_device *oct, u32 q_no, int cmd,
u32 arg)
{
struct octeon_droq *droq;
struct octeon_config *oct_cfg = NULL;
oct_cfg = octeon_get_conf(oct);
if (!oct_cfg)
return -EINVAL;
if (q_no >= CFG_GET_OQ_MAX_Q(oct_cfg)) {
dev_err(&oct->pci_dev->dev, "%s: droq id (%d) exceeds MAX (%d)\n",
__func__, q_no, (oct->num_oqs - 1));
return -EINVAL;
}
droq = oct->droq[q_no];
......
......@@ -121,6 +121,9 @@ struct oct_droq_stats {
/** Num of Packets dropped due to receive path failures. */
u64 rx_dropped;
/** Num of vxlan packets received; */
u64 rx_vxlan;
/** Num of failures of recv_buffer_alloc() */
u64 rx_alloc_failure;
......@@ -413,24 +416,9 @@ int octeon_register_dispatch_fn(struct octeon_device *oct,
u16 subcode,
octeon_dispatch_fn_t fn, void *fn_arg);
/** Remove registration for an opcode/subcode. This will delete the mapping for
* an opcode/subcode. The dispatch function will be unregistered and will no
* longer be called if a packet with the opcode/subcode arrives in the driver
* output queues.
* @param oct - the octeon device to unregister from.
* @param opcode - the opcode to be unregistered.
* @param subcode - the subcode to be unregistered.
*
* @return Success: 0; Failure: 1
*/
int octeon_unregister_dispatch_fn(struct octeon_device *oct,
u16 opcode,
u16 subcode);
void octeon_droq_print_stats(void);
u32 octeon_droq_check_hw_for_pkts(struct octeon_device *oct,
struct octeon_droq *droq);
u32 octeon_droq_check_hw_for_pkts(struct octeon_droq *droq);
int octeon_create_droq(struct octeon_device *oct, u32 q_no,
u32 num_descs, u32 desc_size, void *app_ctx);
......
......@@ -66,6 +66,7 @@ struct oct_iq_stats {
u64 tx_dropped;/**< Numof pkts dropped dueto xmitpath errors. */
u64 tx_tot_bytes;/**< Total count of bytes sento to network. */
u64 tx_gso; /* count of tso */
u64 tx_vxlan; /* tunnel */
u64 tx_dmamap_fail;
u64 tx_restart;
/*u64 tx_timeout_count;*/
......@@ -98,7 +99,7 @@ struct octeon_instr_queue {
u32 rsvd:17;
/* Controls the periodic flushing of iq */
/* Controls whether extra flushing of IQ is done on Tx */
u32 do_auto_flush:1;
u32 status:8;
......
......@@ -174,7 +174,7 @@ sleep_cond(wait_queue_head_t *wait_queue, int *condition)
init_waitqueue_entry(&we, current);
add_wait_queue(wait_queue, &we);
while (!(ACCESS_ONCE(*condition))) {
while (!(READ_ONCE(*condition))) {
set_current_state(TASK_INTERRUPTIBLE);
if (signal_pending(current))
goto out;
......
......@@ -19,43 +19,29 @@
* This file may also be available under a different license from Cavium.
* Contact Cavium, Inc. for more information
**********************************************************************/
#include <linux/version.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/kthread.h>
#include <linux/netdevice.h>
#include "octeon_config.h"
#include "liquidio_common.h"
#include "octeon_droq.h"
#include "octeon_iq.h"
#include "response_manager.h"
#include "octeon_device.h"
#include "octeon_nic.h"
#include "octeon_main.h"
#include "octeon_network.h"
#include "cn66xx_regs.h"
#include "cn66xx_device.h"
#include "cn68xx_regs.h"
#include "cn68xx_device.h"
#include "liquidio_image.h"
#include "octeon_mem_ops.h"
#define MEMOPS_IDX MAX_BAR1_MAP_INDEX
#ifdef __BIG_ENDIAN_BITFIELD
static inline void
octeon_toggle_bar1_swapmode(struct octeon_device *oct __attribute__((unused)),
u32 idx __attribute__((unused)))
octeon_toggle_bar1_swapmode(struct octeon_device *oct, u32 idx)
{
#ifdef __BIG_ENDIAN_BITFIELD
u32 mask;
mask = oct->fn_list.bar1_idx_read(oct, idx);
mask = (mask & 0x2) ? (mask & ~2) : (mask | 2);
oct->fn_list.bar1_idx_write(oct, idx, mask);
#endif
}
#else
#define octeon_toggle_bar1_swapmode(oct, idx) (oct = oct)
#endif
static void
octeon_pci_fastwrite(struct octeon_device *oct, u8 __iomem *mapped_addr,
......
......@@ -30,6 +30,9 @@
#include <linux/dma-mapping.h>
#include <linux/ptp_clock_kernel.h>
#define LIO_MAX_MTU_SIZE (OCTNET_MAX_FRM_SIZE - OCTNET_FRM_HEADER_SIZE)
#define LIO_MIN_MTU_SIZE 68
struct oct_nic_stats_resp {
u64 rh;
struct oct_link_stats stats;
......@@ -96,6 +99,12 @@ struct lio {
/** Copy of Interface capabilities: TSO, TSO6, LRO, Chescksums . */
u64 dev_capability;
/* Copy of transmit encapsulation capabilities:
* TSO, TSO6, Checksums for this device for Kernel
* 3.10.0 onwards
*/
u64 enc_dev_capability;
/** Copy of beacaon reg in phy */
u32 phy_beacon_val;
......@@ -115,7 +124,6 @@ struct lio {
/* work queue for txq status */
struct cavium_wq txq_status_wq;
};
#define LIO_SIZE (sizeof(struct lio))
......@@ -351,7 +359,7 @@ lio_map_ring_info(struct octeon_droq *droq, u32 i)
dma_addr = dma_map_single(&oct->pci_dev->dev, &droq->info_list[i],
OCT_DROQ_INFO_SIZE, DMA_FROM_DEVICE);
BUG_ON(dma_mapping_error(&oct->pci_dev->dev, dma_addr));
WARN_ON(dma_mapping_error(&oct->pci_dev->dev, dma_addr));
return (u64)dma_addr;
}
......
......@@ -19,14 +19,9 @@
* This file may also be available under a different license from Cavium.
* Contact Cavium, Inc. for more information
**********************************************************************/
#include <linux/version.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/kthread.h>
#include <linux/netdevice.h>
#include "octeon_config.h"
#include "liquidio_common.h"
#include "octeon_droq.h"
#include "octeon_iq.h"
......@@ -34,13 +29,6 @@
#include "octeon_device.h"
#include "octeon_nic.h"
#include "octeon_main.h"
#include "octeon_network.h"
#include "cn66xx_regs.h"
#include "cn66xx_device.h"
#include "cn68xx_regs.h"
#include "cn68xx_device.h"
#include "liquidio_image.h"
#include "octeon_mem_ops.h"
void *
octeon_alloc_soft_command_resp(struct octeon_device *oct,
......
......@@ -19,28 +19,17 @@
* This file may also be available under a different license from Cavium.
* Contact Cavium, Inc. for more information
**********************************************************************/
#include <linux/version.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/kthread.h>
#include <linux/netdevice.h>
#include <linux/vmalloc.h>
#include "octeon_config.h"
#include "liquidio_common.h"
#include "octeon_droq.h"
#include "octeon_iq.h"
#include "response_manager.h"
#include "octeon_device.h"
#include "octeon_nic.h"
#include "octeon_main.h"
#include "octeon_network.h"
#include "cn66xx_regs.h"
#include "cn66xx_device.h"
#include "cn68xx_regs.h"
#include "cn68xx_device.h"
#include "liquidio_image.h"
#define INCR_INSTRQUEUE_PKT_COUNT(octeon_dev_ptr, iq_no, field, count) \
(octeon_dev_ptr->instr_queue[iq_no]->stats.field += count)
......@@ -301,40 +290,8 @@ static inline void __copy_cmd_into_iq(struct octeon_instr_queue *iq,
memcpy(iqptr, cmd, cmdsize);
}
static inline int
__post_command(struct octeon_device *octeon_dev __attribute__((unused)),
struct octeon_instr_queue *iq,
u32 force_db __attribute__((unused)), u8 *cmd)
{
u32 index = -1;
/* This ensures that the read index does not wrap around to the same
* position if queue gets full before Octeon could fetch any instr.
*/
if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 1))
return -1;
__copy_cmd_into_iq(iq, cmd);
/* "index" is returned, host_write_index is modified. */
index = iq->host_write_index;
INCR_INDEX_BY1(iq->host_write_index, iq->max_count);
iq->fill_cnt++;
/* Flush the command into memory. We need to be sure the data is in
* memory before indicating that the instruction is pending.
*/
wmb();
atomic_inc(&iq->instr_pending);
return index;
}
static inline struct iq_post_status
__post_command2(struct octeon_device *octeon_dev __attribute__((unused)),
struct octeon_instr_queue *iq,
u32 force_db __attribute__((unused)), u8 *cmd)
__post_command2(struct octeon_instr_queue *iq, u8 *cmd)
{
struct iq_post_status st;
......@@ -392,6 +349,7 @@ __add_to_request_list(struct octeon_instr_queue *iq,
iq->request_list[idx].reqtype = reqtype;
}
/* Can only run in process context */
int
lio_process_iq_request_list(struct octeon_device *oct,
struct octeon_instr_queue *iq, u32 napi_budget)
......@@ -403,6 +361,7 @@ lio_process_iq_request_list(struct octeon_device *oct,
unsigned int pkts_compl = 0, bytes_compl = 0;
struct octeon_soft_command *sc;
struct octeon_instr_irh *irh;
unsigned long flags;
while (old != iq->octeon_read_index) {
reqtype = iq->request_list[old].reqtype;
......@@ -432,17 +391,22 @@ lio_process_iq_request_list(struct octeon_device *oct,
* command response list because we expect
* a response from Octeon.
*/
spin_lock_bh(&oct->response_list
[OCTEON_ORDERED_SC_LIST].lock);
spin_lock_irqsave
(&oct->response_list
[OCTEON_ORDERED_SC_LIST].lock,
flags);
atomic_inc(&oct->response_list
[OCTEON_ORDERED_SC_LIST].
pending_req_count);
list_add_tail(&sc->node, &oct->response_list
[OCTEON_ORDERED_SC_LIST].head);
spin_unlock_bh(&oct->response_list
[OCTEON_ORDERED_SC_LIST].lock);
spin_unlock_irqrestore
(&oct->response_list
[OCTEON_ORDERED_SC_LIST].lock,
flags);
} else {
if (sc->callback) {
/* This callback must not sleep */
sc->callback(oct, OCTEON_REQUEST_DONE,
sc->callback_arg);
}
......@@ -559,11 +523,12 @@ static void check_db_timeout(struct work_struct *work)
{
struct cavium_wk *wk = (struct cavium_wk *)work;
struct octeon_device *oct = (struct octeon_device *)wk->ctxptr;
unsigned long iq_no = wk->ctxul;
u64 iq_no = wk->ctxul;
struct cavium_wq *db_wq = &oct->check_db_wq[iq_no];
u32 delay = 10;
__check_db_timeout(oct, iq_no);
queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(1));
queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(delay));
}
int
......@@ -579,7 +544,7 @@ octeon_send_command(struct octeon_device *oct, u32 iq_no,
*/
spin_lock_bh(&iq->post_lock);
st = __post_command2(oct, iq, force_db, cmd);
st = __post_command2(iq, cmd);
if (st.status != IQ_SEND_FAILED) {
octeon_report_sent_bytes_to_bql(buf, reqtype);
......@@ -587,7 +552,7 @@ octeon_send_command(struct octeon_device *oct, u32 iq_no,
INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, bytes_sent, datasize);
INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_posted, 1);
if (iq->fill_cnt >= iq->fill_threshold || force_db)
if (force_db)
ring_doorbell(oct, iq);
} else {
INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_dropped, 1);
......@@ -618,8 +583,8 @@ octeon_prepare_soft_command(struct octeon_device *oct,
struct octeon_instr_irh *irh;
struct octeon_instr_rdp *rdp;
BUG_ON(opcode > 15);
BUG_ON(subcode > 127);
WARN_ON(opcode > 15);
WARN_ON(subcode > 127);
oct_cfg = octeon_get_conf(oct);
......@@ -661,7 +626,6 @@ int octeon_send_soft_command(struct octeon_device *oct,
{
struct octeon_instr_ih2 *ih2;
struct octeon_instr_irh *irh;
struct octeon_instr_rdp *rdp;
u32 len;
ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2;
......@@ -671,12 +635,10 @@ int octeon_send_soft_command(struct octeon_device *oct,
}
irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
if (irh->rflag) {
BUG_ON(!sc->dmarptr);
BUG_ON(!sc->status_word);
WARN_ON(!sc->dmarptr);
WARN_ON(!sc->status_word);
*sc->status_word = COMPLETION_WORD_INIT;
rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp;
sc->cmd.cmd2.rptr = sc->dmarptr;
}
len = (u32)ih2->dlengsz;
......@@ -720,7 +682,7 @@ int octeon_free_sc_buffer_pool(struct octeon_device *oct)
struct list_head *tmp, *tmp2;
struct octeon_soft_command *sc;
spin_lock(&oct->sc_buf_pool.lock);
spin_lock_bh(&oct->sc_buf_pool.lock);
list_for_each_safe(tmp, tmp2, &oct->sc_buf_pool.head) {
list_del(tmp);
......@@ -732,7 +694,7 @@ int octeon_free_sc_buffer_pool(struct octeon_device *oct)
INIT_LIST_HEAD(&oct->sc_buf_pool.head);
spin_unlock(&oct->sc_buf_pool.lock);
spin_unlock_bh(&oct->sc_buf_pool.lock);
return 0;
}
......@@ -748,13 +710,13 @@ struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct,
struct octeon_soft_command *sc = NULL;
struct list_head *tmp;
BUG_ON((offset + datasize + rdatasize + ctxsize) >
WARN_ON((offset + datasize + rdatasize + ctxsize) >
SOFT_COMMAND_BUFFER_SIZE);
spin_lock(&oct->sc_buf_pool.lock);
spin_lock_bh(&oct->sc_buf_pool.lock);
if (list_empty(&oct->sc_buf_pool.head)) {
spin_unlock(&oct->sc_buf_pool.lock);
spin_unlock_bh(&oct->sc_buf_pool.lock);
return NULL;
}
......@@ -765,7 +727,7 @@ struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct,
atomic_inc(&oct->sc_buf_pool.alloc_buf_count);
spin_unlock(&oct->sc_buf_pool.lock);
spin_unlock_bh(&oct->sc_buf_pool.lock);
sc = (struct octeon_soft_command *)tmp;
......@@ -795,7 +757,7 @@ struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct,
offset = (offset + datasize + 127) & 0xffffff80;
if (rdatasize) {
BUG_ON(rdatasize < 16);
WARN_ON(rdatasize < 16);
sc->virtrptr = (u8 *)sc + offset;
sc->dmarptr = dma_addr + offset;
sc->rdatasize = rdatasize;
......@@ -808,11 +770,11 @@ struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct,
void octeon_free_soft_command(struct octeon_device *oct,
struct octeon_soft_command *sc)
{
spin_lock(&oct->sc_buf_pool.lock);
spin_lock_bh(&oct->sc_buf_pool.lock);
list_add_tail(&sc->node, &oct->sc_buf_pool.head);
atomic_dec(&oct->sc_buf_pool.alloc_buf_count);
spin_unlock(&oct->sc_buf_pool.lock);
spin_unlock_bh(&oct->sc_buf_pool.lock);
}
......@@ -19,28 +19,14 @@
* This file may also be available under a different license from Cavium.
* Contact Cavium, Inc. for more information
**********************************************************************/
#include <linux/version.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/pci.h>
#include <linux/kthread.h>
#include <linux/netdevice.h>
#include "octeon_config.h"
#include "liquidio_common.h"
#include "octeon_droq.h"
#include "octeon_iq.h"
#include "response_manager.h"
#include "octeon_device.h"
#include "octeon_nic.h"
#include "octeon_main.h"
#include "octeon_network.h"
#include "cn66xx_regs.h"
#include "cn66xx_device.h"
#include "cn68xx_regs.h"
#include "cn68xx_device.h"
#include "liquidio_image.h"
static void oct_poll_req_completion(struct work_struct *work);
......@@ -66,7 +52,7 @@ int octeon_setup_response_list(struct octeon_device *oct)
INIT_DELAYED_WORK(&cwq->wk.work, oct_poll_req_completion);
cwq->wk.ctxptr = oct;
oct->cmd_resp_state = OCT_DRV_ONLINE;
queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(100));
queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(50));
return ret;
}
......@@ -176,6 +162,5 @@ static void oct_poll_req_completion(struct work_struct *work)
struct cavium_wq *cwq = &oct->dma_comp_wq;
lio_process_ordered_list(oct, 0);
queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(100));
queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(50));
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment