Commit 22f87883 authored by David S. Miller's avatar David S. Miller

Merge branch 'liquidio-next'

Raghu Vatsavayi says:

====================
liquidio updates and bug fixes

Following V2 patchset contains updates and bug fixes for
liquidio driver. This patchset also has changes that you
suggested in vxlan code. Please apply the patches in following
order as some of the patches depend on earlier patches in the
series.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 13c5c240 9fbc48f6
......@@ -19,26 +19,16 @@
* This file may also be available under a different license from Cavium.
* Contact Cavium, Inc. for more information
**********************************************************************/
#include <linux/version.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/kthread.h>
#include <linux/netdevice.h>
#include "octeon_config.h"
#include "liquidio_common.h"
#include "octeon_droq.h"
#include "octeon_iq.h"
#include "response_manager.h"
#include "octeon_device.h"
#include "octeon_nic.h"
#include "octeon_main.h"
#include "octeon_network.h"
#include "cn66xx_regs.h"
#include "cn66xx_device.h"
#include "liquidio_image.h"
#include "octeon_mem_ops.h"
int lio_cn6xxx_soft_reset(struct octeon_device *oct)
{
......@@ -74,9 +64,9 @@ void lio_cn6xxx_enable_error_reporting(struct octeon_device *oct)
u32 val;
pci_read_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, &val);
if (val & 0x000f0000) {
if (val & 0x000c0000) {
dev_err(&oct->pci_dev->dev, "PCI-E Link error detected: 0x%08x\n",
val & 0x000f0000);
val & 0x000c0000);
}
val |= 0xf; /* Enable Link error reporting */
......@@ -229,7 +219,7 @@ void lio_cn6xxx_setup_global_output_regs(struct octeon_device *oct)
/* / Select Packet count instead of bytes for SLI_PKTi_CNTS[CNT] */
octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_BMODE, 0);
/* / Select ES,RO,NS setting from register for Output Queue Packet
/* Select ES, RO, NS setting from register for Output Queue Packet
* Address
*/
octeon_write_csr(oct, CN6XXX_SLI_PKT_DPADDR, 0xFFFFFFFF);
......@@ -547,14 +537,14 @@ static void lio_cn6xxx_get_pcie_qlmport(struct octeon_device *oct)
dev_dbg(&oct->pci_dev->dev, "Using PCIE Port %d\n", oct->pcie_port);
}
void
static void
lio_cn6xxx_process_pcie_error_intr(struct octeon_device *oct, u64 intr64)
{
dev_err(&oct->pci_dev->dev, "Error Intr: 0x%016llx\n",
CVM_CAST64(intr64));
}
int lio_cn6xxx_process_droq_intr_regs(struct octeon_device *oct)
static int lio_cn6xxx_process_droq_intr_regs(struct octeon_device *oct)
{
struct octeon_droq *droq;
int oq_no;
......@@ -579,7 +569,7 @@ int lio_cn6xxx_process_droq_intr_regs(struct octeon_device *oct)
continue;
droq = oct->droq[oq_no];
pkt_count = octeon_droq_check_hw_for_pkts(oct, droq);
pkt_count = octeon_droq_check_hw_for_pkts(droq);
if (pkt_count) {
oct->droq_intr |= (1ULL << oq_no);
if (droq->ops.poll_mode) {
......
......@@ -82,8 +82,6 @@ void lio_cn6xxx_setup_iq_regs(struct octeon_device *oct, u32 iq_no);
void lio_cn6xxx_setup_oq_regs(struct octeon_device *oct, u32 oq_no);
void lio_cn6xxx_enable_io_queues(struct octeon_device *oct);
void lio_cn6xxx_disable_io_queues(struct octeon_device *oct);
void lio_cn6xxx_process_pcie_error_intr(struct octeon_device *oct, u64 intr64);
int lio_cn6xxx_process_droq_intr_regs(struct octeon_device *oct);
irqreturn_t lio_cn6xxx_process_interrupt_regs(void *dev);
void lio_cn6xxx_reinit_regs(struct octeon_device *oct);
void lio_cn6xxx_bar1_idx_setup(struct octeon_device *oct, u64 core_addr,
......
......@@ -19,28 +19,17 @@
* This file may also be available under a different license from Cavium.
* Contact Cavium, Inc. for more information
**********************************************************************/
#include <linux/version.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/kthread.h>
#include <linux/netdevice.h>
#include "octeon_config.h"
#include "liquidio_common.h"
#include "octeon_droq.h"
#include "octeon_iq.h"
#include "response_manager.h"
#include "octeon_device.h"
#include "octeon_nic.h"
#include "octeon_main.h"
#include "octeon_network.h"
#include "cn66xx_regs.h"
#include "cn66xx_device.h"
#include "cn68xx_regs.h"
#include "cn68xx_device.h"
#include "liquidio_image.h"
#include "octeon_mem_ops.h"
static void lio_cn68xx_set_dpi_regs(struct octeon_device *oct)
{
......@@ -129,7 +118,7 @@ static inline void lio_cn68xx_vendor_message_fix(struct octeon_device *oct)
pci_write_config_dword(oct->pci_dev, CN6XXX_PCIE_FLTMSK, val);
}
int lio_is_210nv(struct octeon_device *oct)
static int lio_is_210nv(struct octeon_device *oct)
{
u64 mio_qlm4_cfg = lio_pci_readq(oct, CN6XXX_MIO_QLM4_CFG);
......
......@@ -28,6 +28,5 @@
#define __CN68XX_DEVICE_H__
int lio_setup_cn68xx_octeon_device(struct octeon_device *oct);
int lio_is_210nv(struct octeon_device *oct);
#endif
......@@ -29,7 +29,6 @@
#ifndef __CN68XX_REGS_H__
#define __CN68XX_REGS_H__
#include "cn66xx_regs.h"
/*###################### REQUEST QUEUE #########################*/
......
......@@ -19,13 +19,9 @@
* This file may also be available under a different license from Cavium.
* Contact Cavium, Inc. for more information
**********************************************************************/
#include <linux/version.h>
#include <linux/netdevice.h>
#include <linux/net_tstamp.h>
#include <linux/ethtool.h>
#include <linux/dma-mapping.h>
#include <linux/pci.h>
#include "octeon_config.h"
#include "liquidio_common.h"
#include "octeon_droq.h"
#include "octeon_iq.h"
......@@ -36,9 +32,6 @@
#include "octeon_network.h"
#include "cn66xx_regs.h"
#include "cn66xx_device.h"
#include "cn68xx_regs.h"
#include "cn68xx_device.h"
#include "liquidio_image.h"
static int octnet_get_link_stats(struct net_device *netdev);
......@@ -106,6 +99,7 @@ static const char oct_stats_strings[][ETH_GSTRING_LEN] = {
"tx_tso",
"tx_tso_packets",
"tx_tso_err",
"tx_vxlan",
"mac_tx_total_pkts",
"mac_tx_total_bytes",
......@@ -129,6 +123,9 @@ static const char oct_stats_strings[][ETH_GSTRING_LEN] = {
"rx_err_link",
"rx_err_drop",
"rx_vxlan",
"rx_vxlan_err",
"rx_lro_pkts",
"rx_lro_bytes",
"rx_total_lro",
......@@ -167,6 +164,7 @@ static const char oct_iq_stats_strings[][ETH_GSTRING_LEN] = {
"fw_bytes_sent",
"tso",
"vxlan",
"txq_restart",
};
......@@ -186,6 +184,7 @@ static const char oct_droq_stats_strings[][ETH_GSTRING_LEN] = {
"fw_bytes_received",
"fw_dropped_nodispatch",
"vxlan",
"buffer_alloc_failure",
};
......@@ -340,20 +339,18 @@ static void octnet_mdio_resp_callback(struct octeon_device *oct,
u32 status,
void *buf)
{
struct oct_mdio_cmd_resp *mdio_cmd_rsp;
struct oct_mdio_cmd_context *mdio_cmd_ctx;
struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr;
mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr;
oct = lio_get_device(mdio_cmd_ctx->octeon_id);
if (status) {
dev_err(&oct->pci_dev->dev, "MIDO instruction failed. Status: %llx\n",
CVM_CAST64(status));
ACCESS_ONCE(mdio_cmd_ctx->cond) = -1;
WRITE_ONCE(mdio_cmd_ctx->cond, -1);
} else {
ACCESS_ONCE(mdio_cmd_ctx->cond) = 1;
WRITE_ONCE(mdio_cmd_ctx->cond, 1);
}
wake_up_interruptible(&mdio_cmd_ctx->wc);
}
......@@ -384,7 +381,7 @@ octnet_mdio45_access(struct lio *lio, int op, int loc, int *value)
mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr;
mdio_cmd = (struct oct_mdio_cmd *)sc->virtdptr;
ACCESS_ONCE(mdio_cmd_ctx->cond) = 0;
WRITE_ONCE(mdio_cmd_ctx->cond, 0);
mdio_cmd_ctx->octeon_id = lio_get_device_id(oct_dev);
mdio_cmd->op = op;
mdio_cmd->mdio_addr = loc;
......@@ -423,7 +420,7 @@ octnet_mdio45_access(struct lio *lio, int op, int loc, int *value)
octeon_swap_8B_data((u64 *)(&mdio_cmd_rsp->resp),
sizeof(struct oct_mdio_cmd) / 8);
if (ACCESS_ONCE(mdio_cmd_ctx->cond) == 1) {
if (READ_ONCE(mdio_cmd_ctx->cond) == 1) {
if (!op)
*value = mdio_cmd_rsp->resp.value1;
} else {
......@@ -467,18 +464,16 @@ static int lio_set_phys_id(struct net_device *netdev,
/* Configure Beacon values */
value = LIO68XX_LED_BEACON_CFGON;
ret =
octnet_mdio45_access(lio, 1,
LIO68XX_LED_BEACON_ADDR,
&value);
ret = octnet_mdio45_access(lio, 1,
LIO68XX_LED_BEACON_ADDR,
&value);
if (ret)
return ret;
value = LIO68XX_LED_CTRL_CFGON;
ret =
octnet_mdio45_access(lio, 1,
LIO68XX_LED_CTRL_ADDR,
&value);
ret = octnet_mdio45_access(lio, 1,
LIO68XX_LED_CTRL_ADDR,
&value);
if (ret)
return ret;
} else {
......@@ -557,7 +552,7 @@ lio_ethtool_get_ringparam(struct net_device *netdev,
tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf6x, lio->ifidx);
}
if (lio->mtu > OCTNET_DEFAULT_FRM_SIZE) {
if (lio->mtu > OCTNET_DEFAULT_FRM_SIZE - OCTNET_FRM_HEADER_SIZE) {
ering->rx_pending = 0;
ering->rx_max_pending = 0;
ering->rx_mini_pending = 0;
......@@ -617,7 +612,8 @@ lio_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
static void
lio_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
struct ethtool_stats *stats __attribute__((unused)),
u64 *data)
{
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct_dev = lio->oct_dev;
......@@ -675,6 +671,10 @@ lio_get_ethtool_stats(struct net_device *netdev,
*fw_err_tso
*/
data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_tso);
/*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
*fw_tx_vxlan
*/
data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tx_vxlan);
/* mac tx statistics */
/*CVMX_BGXX_CMRX_TX_STAT5 */
......@@ -729,6 +729,15 @@ lio_get_ethtool_stats(struct net_device *netdev,
*/
data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_drop);
/*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
*fromwire.fw_rx_vxlan
*/
data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan);
/*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
*fromwire.fw_rx_vxlan_err
*/
data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan_err);
/* LRO */
/*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
*fw_lro_pkts
......@@ -822,6 +831,8 @@ lio_get_ethtool_stats(struct net_device *netdev,
/*tso request*/
data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_gso);
/*vxlan request*/
data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_vxlan);
/*txq restart*/
data[i++] =
CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_restart);
......@@ -858,6 +869,9 @@ lio_get_ethtool_stats(struct net_device *netdev,
CVM_CAST64(oct_dev->droq[j]->stats.bytes_received);
data[i++] =
CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch);
data[i++] =
CVM_CAST64(oct_dev->droq[j]->stats.rx_vxlan);
data[i++] =
CVM_CAST64(oct_dev->droq[j]->stats.rx_alloc_failure);
}
......@@ -945,7 +959,6 @@ static int lio_get_intr_coalesce(struct net_device *netdev,
intr_coal->rx_max_coalesced_frames =
CFG_GET_OQ_INTR_PKT(cn6xxx->conf);
}
iq = oct->instr_queue[lio->linfo.txpciq[0].s.q_no];
intr_coal->tx_max_coalesced_frames = iq->fill_threshold;
break;
......@@ -1043,7 +1056,7 @@ static int octnet_set_intrmod_cfg(struct lio *lio,
return 0;
}
void
static void
octnet_nic_stats_callback(struct octeon_device *oct_dev,
u32 status, void *ptr)
{
......@@ -1083,6 +1096,9 @@ octnet_nic_stats_callback(struct octeon_device *oct_dev,
rstats->fw_err_pko = rsp_rstats->fw_err_pko;
rstats->fw_err_link = rsp_rstats->fw_err_link;
rstats->fw_err_drop = rsp_rstats->fw_err_drop;
rstats->fw_rx_vxlan = rsp_rstats->fw_rx_vxlan;
rstats->fw_rx_vxlan_err = rsp_rstats->fw_rx_vxlan_err;
/* Number of packets that are LROed */
rstats->fw_lro_pkts = rsp_rstats->fw_lro_pkts;
/* Number of octets that are LROed */
......@@ -1127,6 +1143,8 @@ octnet_nic_stats_callback(struct octeon_device *oct_dev,
tstats->fw_tso = rsp_tstats->fw_tso;
tstats->fw_tso_fwd = rsp_tstats->fw_tso_fwd;
tstats->fw_err_tso = rsp_tstats->fw_err_tso;
tstats->fw_tx_vxlan = rsp_tstats->fw_tx_vxlan;
resp->status = 1;
} else {
resp->status = -1;
......@@ -1523,7 +1541,7 @@ static int lio_nway_reset(struct net_device *netdev)
}
/* Return register dump len. */
static int lio_get_regs_len(struct net_device *dev)
static int lio_get_regs_len(struct net_device *dev __attribute__((unused)))
{
return OCT_ETHTOOL_REGDUMP_LEN;
}
......@@ -1667,13 +1685,12 @@ static void lio_get_regs(struct net_device *dev,
int len = 0;
struct octeon_device *oct = lio->oct_dev;
memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN);
regs->version = OCT_ETHTOOL_REGSVER;
switch (oct->chip_id) {
/* case OCTEON_CN73XX: Todo */
case OCTEON_CN68XX:
case OCTEON_CN66XX:
memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN);
len += cn6xxx_read_csr_reg(regbuf + len, oct);
len += cn6xxx_read_config_reg(regbuf + len, oct);
break;
......
......@@ -34,6 +34,7 @@
#define LIQUIDIO_MICRO_VERSION ".1"
#define LIQUIDIO_PACKAGE ""
#define LIQUIDIO_VERSION "1.4.1"
#define CONTROL_IQ 0
/** Tag types used by Octeon cores in its work. */
enum octeon_tag_type {
......@@ -216,6 +217,13 @@ static inline void add_sg_size(struct octeon_sg_entry *sg_entry,
#define OCTNET_CMD_ENABLE_VLAN_FILTER 0x16
#define OCTNET_CMD_ADD_VLAN_FILTER 0x17
#define OCTNET_CMD_DEL_VLAN_FILTER 0x18
#define OCTNET_CMD_VXLAN_PORT_CONFIG 0x19
#define OCTNET_CMD_VXLAN_PORT_ADD 0x0
#define OCTNET_CMD_VXLAN_PORT_DEL 0x1
#define OCTNET_CMD_RXCSUM_ENABLE 0x0
#define OCTNET_CMD_RXCSUM_DISABLE 0x1
#define OCTNET_CMD_TXCSUM_ENABLE 0x0
#define OCTNET_CMD_TXCSUM_DISABLE 0x1
/* RX(packets coming from wire) Checksum verification flags */
/* TCP/UDP csum */
......@@ -288,7 +296,7 @@ union octnet_cmd {
#define OCTNET_CMD_SIZE (sizeof(union octnet_cmd))
/* Instruction Header (DPI - CN23xx) - for OCTEON-III models */
/* Instruction Header(DPI) - for OCTEON-III models */
struct octeon_instr_ih3 {
#ifdef __BIG_ENDIAN_BITFIELD
......@@ -338,7 +346,7 @@ struct octeon_instr_ih3 {
#endif
};
/* Optional PKI Instruction Header(PKI IH) - for OCTEON CN23XX models */
/* Optional PKI Instruction Header(PKI IH) - for OCTEON-III models */
/** BIG ENDIAN format. */
struct octeon_instr_pki_ih3 {
#ifdef __BIG_ENDIAN_BITFIELD
......@@ -533,6 +541,8 @@ union octeon_rh {
u64 priority:3;
u64 csum_verified:3; /** checksum verified. */
u64 has_hwtstamp:1; /** Has hardware timestamp. 1 = yes. */
u64 encap_on:1;
u64 has_hash:1; /** Has hash (rth or rss). 1 = yes. */
} r_dh;
struct {
u64 opcode:4;
......@@ -542,7 +552,8 @@ union octeon_rh {
u64 num_gmx_ports:8;
u64 max_nic_ports:10;
u64 app_cap_flags:4;
u64 app_mode:16;
u64 app_mode:8;
u64 pkind:8;
} r_core_drv_init;
struct {
u64 opcode:4;
......@@ -562,6 +573,8 @@ union octeon_rh {
u64 opcode:4;
} r;
struct {
u64 has_hash:1; /** Has hash (rth or rss). 1 = yes. */
u64 encap_on:1;
u64 has_hwtstamp:1; /** 1 = has hwtstamp */
u64 csum_verified:3; /** checksum verified. */
u64 priority:3;
......@@ -572,7 +585,8 @@ union octeon_rh {
u64 opcode:4;
} r_dh;
struct {
u64 app_mode:16;
u64 pkind:8;
u64 app_mode:8;
u64 app_cap_flags:4;
u64 max_nic_ports:10;
u64 num_gmx_ports:8;
......@@ -630,9 +644,11 @@ union oct_link_status {
u64 autoneg:1;
u64 if_mode:5;
u64 pause:1;
u64 reserved:16;
u64 flashing:1;
u64 reserved:15;
#else
u64 reserved:16;
u64 reserved:15;
u64 flashing:1;
u64 pause:1;
u64 if_mode:5;
u64 autoneg:1;
......@@ -736,6 +752,8 @@ struct nic_rx_stats {
u64 fw_err_pko;
u64 fw_err_link;
u64 fw_err_drop;
u64 fw_rx_vxlan;
u64 fw_rx_vxlan_err;
/* LRO */
u64 fw_lro_pkts; /* Number of packets that are LROed */
......@@ -776,6 +794,7 @@ struct nic_tx_stats {
u64 fw_err_tso;
u64 fw_tso; /* number of tso requests */
u64 fw_tso_fwd; /* number of packets segmented in tso */
u64 fw_tx_vxlan;
};
struct oct_link_stats {
......@@ -856,9 +875,9 @@ union oct_nic_if_cfg {
u64 num_iqueues:16;
u64 num_oqueues:16;
u64 gmx_port_id:8;
u64 reserved:8;
u64 vf_id:8;
#else
u64 reserved:8;
u64 vf_id:8;
u64 gmx_port_id:8;
u64 num_oqueues:16;
u64 num_iqueues:16;
......
......@@ -226,7 +226,7 @@ struct octeon_oq_config {
*/
u64 refill_threshold:16;
/** If set, the Output queue uses info-pointer mode. (Default: 1 ) */
/** If set, the Output queue uses info-pointer mode. (Default: 1) */
u64 info_ptr:32;
/* Max number of OQs available */
......@@ -236,7 +236,7 @@ struct octeon_oq_config {
/* Max number of OQs available */
u64 max_oqs:8;
/** If set, the Output queue uses info-pointer mode. (Default: 1 ) */
/** If set, the Output queue uses info-pointer mode. (Default: 1) */
u64 info_ptr:32;
/** The number of buffers that were consumed during packet processing by
......
......@@ -23,27 +23,14 @@
/**
* @file octeon_console.c
*/
#include <linux/version.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/kthread.h>
#include <linux/netdevice.h>
#include "octeon_config.h"
#include "liquidio_common.h"
#include "octeon_droq.h"
#include "octeon_iq.h"
#include "response_manager.h"
#include "octeon_device.h"
#include "octeon_nic.h"
#include "octeon_main.h"
#include "octeon_network.h"
#include "cn66xx_regs.h"
#include "cn66xx_device.h"
#include "cn68xx_regs.h"
#include "cn68xx_device.h"
#include "liquidio_image.h"
#include "octeon_mem_ops.h"
static void octeon_remote_lock(void);
......@@ -51,6 +38,8 @@ static void octeon_remote_unlock(void);
static u64 cvmx_bootmem_phy_named_block_find(struct octeon_device *oct,
const char *name,
u32 flags);
static int octeon_console_read(struct octeon_device *oct, u32 console_num,
char *buffer, u32 buf_size);
#define MIN(a, b) min((a), (b))
#define CAST_ULL(v) ((u64)(v))
......@@ -170,8 +159,8 @@ struct octeon_pci_console_desc {
offsetof(struct cvmx_bootmem_desc, field), \
SIZEOF_FIELD(struct cvmx_bootmem_desc, field))
#define __cvmx_bootmem_lock(flags)
#define __cvmx_bootmem_unlock(flags)
#define __cvmx_bootmem_lock(flags) (flags = flags)
#define __cvmx_bootmem_unlock(flags) (flags = flags)
/**
* This macro returns a member of the
......@@ -234,7 +223,7 @@ static void CVMX_BOOTMEM_NAMED_GET_NAME(struct octeon_device *oct,
u32 len)
{
addr += offsetof(struct cvmx_bootmem_named_block_desc, name);
octeon_pci_read_core_mem(oct, addr, str, len);
octeon_pci_read_core_mem(oct, addr, (u8 *)str, len);
str[len] = 0;
}
......@@ -323,6 +312,9 @@ static u64 cvmx_bootmem_phy_named_block_find(struct octeon_device *oct,
if (name && named_size) {
char *name_tmp =
kmalloc(name_length + 1, GFP_KERNEL);
if (!name_tmp)
break;
CVMX_BOOTMEM_NAMED_GET_NAME(oct, named_addr,
name_tmp,
name_length);
......@@ -383,7 +375,7 @@ static void octeon_remote_unlock(void)
int octeon_console_send_cmd(struct octeon_device *oct, char *cmd_str,
u32 wait_hundredths)
{
u32 len = strlen(cmd_str);
u32 len = (u32)strlen(cmd_str);
dev_dbg(&oct->pci_dev->dev, "sending \"%s\" to bootloader\n", cmd_str);
......@@ -440,8 +432,7 @@ int octeon_wait_for_bootloader(struct octeon_device *oct,
}
static void octeon_console_handle_result(struct octeon_device *oct,
size_t console_num,
char *buffer, s32 bytes_read)
size_t console_num)
{
struct octeon_console *console;
......@@ -492,7 +483,7 @@ static void check_console(struct work_struct *work)
struct octeon_console *console;
struct cavium_wk *wk = (struct cavium_wk *)work;
struct octeon_device *oct = (struct octeon_device *)wk->ctxptr;
size_t console_num = wk->ctxul;
u32 console_num = (u32)wk->ctxul;
u32 delay;
console = &oct->console[console_num];
......@@ -505,20 +496,17 @@ static void check_console(struct work_struct *work)
*/
bytes_read =
octeon_console_read(oct, console_num, console_buffer,
sizeof(console_buffer) - 1, 0);
sizeof(console_buffer) - 1);
if (bytes_read > 0) {
total_read += bytes_read;
if (console->waiting) {
octeon_console_handle_result(oct, console_num,
console_buffer,
bytes_read);
}
if (console->waiting)
octeon_console_handle_result(oct, console_num);
if (octeon_console_debug_enabled(console_num)) {
output_console_line(oct, console, console_num,
console_buffer, bytes_read);
}
} else if (bytes_read < 0) {
dev_err(&oct->pci_dev->dev, "Error reading console %lu, ret=%d\n",
dev_err(&oct->pci_dev->dev, "Error reading console %u, ret=%d\n",
console_num, bytes_read);
}
......@@ -530,7 +518,7 @@ static void check_console(struct work_struct *work)
*/
if (octeon_console_debug_enabled(console_num) &&
(total_read == 0) && (console->leftover[0])) {
dev_info(&oct->pci_dev->dev, "%lu: %s\n",
dev_info(&oct->pci_dev->dev, "%u: %s\n",
console_num, console->leftover);
console->leftover[0] = '\0';
}
......@@ -675,8 +663,8 @@ static inline int octeon_console_avail_bytes(u32 buffer_size,
octeon_console_free_bytes(buffer_size, wr_idx, rd_idx);
}
int octeon_console_read(struct octeon_device *oct, u32 console_num,
char *buffer, u32 buf_size, u32 flags)
static int octeon_console_read(struct octeon_device *oct, u32 console_num,
char *buffer, u32 buf_size)
{
int bytes_to_read;
u32 rd_idx, wr_idx;
......@@ -712,7 +700,7 @@ int octeon_console_read(struct octeon_device *oct, u32 console_num,
bytes_to_read = console->buffer_size - rd_idx;
octeon_pci_read_core_mem(oct, console->output_base_addr + rd_idx,
buffer, bytes_to_read);
(u8 *)buffer, bytes_to_read);
octeon_write_device_mem32(oct, console->addr +
offsetof(struct octeon_pci_console,
output_read_index),
......
......@@ -19,27 +19,19 @@
* This file may also be available under a different license from Cavium.
* Contact Cavium, Inc. for more information
**********************************************************************/
#include <linux/types.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/crc32.h>
#include <linux/kthread.h>
#include <linux/netdevice.h>
#include <linux/vmalloc.h>
#include "octeon_config.h"
#include "liquidio_common.h"
#include "octeon_droq.h"
#include "octeon_iq.h"
#include "response_manager.h"
#include "octeon_device.h"
#include "octeon_nic.h"
#include "octeon_main.h"
#include "octeon_network.h"
#include "cn66xx_regs.h"
#include "cn66xx_device.h"
#include "cn68xx_regs.h"
#include "cn68xx_device.h"
#include "liquidio_image.h"
#include "octeon_mem_ops.h"
......@@ -448,10 +440,10 @@ static struct octeon_config_ptr {
};
static char oct_dev_state_str[OCT_DEV_STATES + 1][32] = {
"BEGIN", "PCI-MAP-DONE", "DISPATCH-INIT-DONE",
"BEGIN", "PCI-MAP-DONE", "DISPATCH-INIT-DONE",
"IQ-INIT-DONE", "SCBUFF-POOL-INIT-DONE", "RESPLIST-INIT-DONE",
"DROQ-INIT-DONE", "IO-QUEUES-INIT-DONE", "CONSOLE-INIT-DONE",
"HOST-READY", "CORE-READY", "RUNNING", "IN-RESET",
"HOST-READY", "CORE-READY", "RUNNING", "IN-RESET",
"INVALID"
};
......@@ -652,16 +644,16 @@ int octeon_download_firmware(struct octeon_device *oct, const u8 *data,
void octeon_free_device_mem(struct octeon_device *oct)
{
u32 i;
int i;
for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
/* could check mask as well */
vfree(oct->droq[i]);
if (oct->io_qmask.oq & (1ULL << i))
vfree(oct->droq[i]);
}
for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
/* could check mask as well */
vfree(oct->instr_queue[i]);
if (oct->io_qmask.iq & (1ULL << i))
vfree(oct->instr_queue[i]);
}
i = oct->octeon_id;
......@@ -752,13 +744,11 @@ struct octeon_device *octeon_allocate_device(u32 pci_id,
/* this function is only for setting up the first queue */
int octeon_setup_instr_queues(struct octeon_device *oct)
{
u32 num_iqs = 0;
u32 num_descs = 0;
u32 iq_no = 0;
union oct_txpciq txpciq;
int numa_node = cpu_to_node(iq_no % num_online_cpus());
num_iqs = 1;
/* this causes queue 0 to be default queue */
if (OCTEON_CN6XXX(oct))
num_descs =
......@@ -793,13 +783,11 @@ int octeon_setup_instr_queues(struct octeon_device *oct)
int octeon_setup_output_queues(struct octeon_device *oct)
{
u32 num_oqs = 0;
u32 num_descs = 0;
u32 desc_size = 0;
u32 oq_no = 0;
int numa_node = cpu_to_node(oq_no % num_online_cpus());
num_oqs = 1;
/* this causes queue 0 to be default queue */
if (OCTEON_CN6XXX(oct)) {
num_descs =
......@@ -1019,79 +1007,6 @@ octeon_register_dispatch_fn(struct octeon_device *oct,
return 0;
}
/* octeon_unregister_dispatch_fn
* Parameters:
* oct - octeon device
* opcode - driver should unregister the function for this opcode
* subcode - driver should unregister the function for this subcode
* Description:
* Unregister the function set for this opcode+subcode.
* Returns:
* Success: 0
* Failure: 1
* Locks:
* No locks are held.
*/
int
octeon_unregister_dispatch_fn(struct octeon_device *oct, u16 opcode,
u16 subcode)
{
int retval = 0;
u32 idx;
struct list_head *dispatch, *dfree = NULL, *tmp2;
u16 combined_opcode = OPCODE_SUBCODE(opcode, subcode);
idx = combined_opcode & OCTEON_OPCODE_MASK;
spin_lock_bh(&oct->dispatch.lock);
if (oct->dispatch.count == 0) {
spin_unlock_bh(&oct->dispatch.lock);
dev_err(&oct->pci_dev->dev,
"No dispatch functions registered for this device\n");
return 1;
}
if (oct->dispatch.dlist[idx].opcode == combined_opcode) {
dispatch = &oct->dispatch.dlist[idx].list;
if (dispatch->next != dispatch) {
dispatch = dispatch->next;
oct->dispatch.dlist[idx].opcode =
((struct octeon_dispatch *)dispatch)->opcode;
oct->dispatch.dlist[idx].dispatch_fn =
((struct octeon_dispatch *)
dispatch)->dispatch_fn;
oct->dispatch.dlist[idx].arg =
((struct octeon_dispatch *)dispatch)->arg;
list_del(dispatch);
dfree = dispatch;
} else {
oct->dispatch.dlist[idx].opcode = 0;
oct->dispatch.dlist[idx].dispatch_fn = NULL;
oct->dispatch.dlist[idx].arg = NULL;
}
} else {
retval = 1;
list_for_each_safe(dispatch, tmp2,
&(oct->dispatch.dlist[idx].
list)) {
if (((struct octeon_dispatch *)dispatch)->opcode ==
combined_opcode) {
list_del(dispatch);
dfree = dispatch;
retval = 0;
}
}
}
if (!retval)
oct->dispatch.count--;
spin_unlock_bh(&oct->dispatch.lock);
vfree(dfree);
return retval;
}
int octeon_core_drv_init(struct octeon_recv_info *recv_info, void *buf)
{
u32 i;
......
......@@ -221,7 +221,7 @@ struct octeon_fn_list {
/* Structure for named memory blocks
* Number of descriptors
* available can be changed without affecting compatiblity,
* available can be changed without affecting compatibility,
* but name length changes require a bump in the bootmem
* descriptor version
* Note: This structure must be naturally 64 bit aligned, as a single
......@@ -254,7 +254,7 @@ struct oct_fw_info {
struct cavium_wk {
struct delayed_work work;
void *ctxptr;
size_t ctxul;
u64 ctxul;
};
struct cavium_wq {
......@@ -585,8 +585,7 @@ int octeon_add_console(struct octeon_device *oct, u32 console_num);
int octeon_console_write(struct octeon_device *oct, u32 console_num,
char *buffer, u32 write_request_size, u32 flags);
int octeon_console_write_avail(struct octeon_device *oct, u32 console_num);
int octeon_console_read(struct octeon_device *oct, u32 console_num,
char *buffer, u32 buf_size, u32 flags);
int octeon_console_read_avail(struct octeon_device *oct, u32 console_num);
/** Removes all attached consoles. */
......
......@@ -19,30 +19,18 @@
* This file may also be available under a different license from Cavium.
* Contact Cavium, Inc. for more information
**********************************************************************/
#include <linux/version.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/pci.h>
#include <linux/kthread.h>
#include <linux/netdevice.h>
#include <linux/vmalloc.h>
#include "octeon_config.h"
#include "liquidio_common.h"
#include "octeon_droq.h"
#include "octeon_iq.h"
#include "response_manager.h"
#include "octeon_device.h"
#include "octeon_nic.h"
#include "octeon_main.h"
#include "octeon_network.h"
#include "cn66xx_regs.h"
#include "cn66xx_device.h"
#include "cn68xx_regs.h"
#include "cn68xx_device.h"
#include "liquidio_image.h"
#include "octeon_mem_ops.h"
/* #define CAVIUM_ONLY_PERF_MODE */
#define CVM_MIN(d1, d2) (((d1) < (d2)) ? (d1) : (d2))
#define CVM_MAX(d1, d2) (((d1) > (d2)) ? (d1) : (d2))
......@@ -104,8 +92,12 @@ static inline void *octeon_get_dispatch_arg(struct octeon_device *octeon_dev,
return fn_arg;
}
u32 octeon_droq_check_hw_for_pkts(struct octeon_device *oct,
struct octeon_droq *droq)
/** Check for packets on Droq. This function should be called with
* lock held.
* @param droq - Droq on which count is checked.
* @return Returns packet count.
*/
u32 octeon_droq_check_hw_for_pkts(struct octeon_droq *droq)
{
u32 pkt_count = 0;
......@@ -196,7 +188,6 @@ octeon_droq_setup_ring_buffers(struct octeon_device *oct,
droq->recv_buf_list[i].buffer = buf;
droq->recv_buf_list[i].data = get_rbd(buf);
droq->info_list[i].length = 0;
/* map ring buffers into memory */
......@@ -569,7 +560,9 @@ octeon_droq_dispatch_pkt(struct octeon_device *oct,
droq->stats.dropped_nomem++;
}
} else {
dev_err(&oct->pci_dev->dev, "DROQ: No dispatch function\n");
dev_err(&oct->pci_dev->dev, "DROQ: No dispatch function (opcode %u/%u)\n",
(unsigned int)rh->r.opcode,
(unsigned int)rh->r.subcode);
droq->stats.dropped_nodispatch++;
} /* else (dispatch_fn ... */
......@@ -654,6 +647,7 @@ octeon_droq_fast_process_packets(struct octeon_device *oct,
pg_info->page = NULL;
droq->recv_buf_list[droq->read_idx].buffer =
NULL;
INCR_INDEX_BY1(droq->read_idx, droq->max_count);
droq->refill_count++;
} else {
......@@ -748,7 +742,7 @@ octeon_droq_process_packets(struct octeon_device *oct,
if (pkt_count > budget)
pkt_count = budget;
/* Grab the lock */
/* Grab the droq lock */
spin_lock(&droq->lock);
pkts_processed = octeon_droq_fast_process_packets(oct, droq, pkt_count);
......@@ -810,7 +804,7 @@ octeon_droq_process_poll_pkts(struct octeon_device *oct,
total_pkts_processed += pkts_processed;
octeon_droq_check_hw_for_pkts(oct, droq);
octeon_droq_check_hw_for_pkts(droq);
}
spin_unlock(&droq->lock);
......@@ -834,18 +828,6 @@ octeon_process_droq_poll_cmd(struct octeon_device *oct, u32 q_no, int cmd,
u32 arg)
{
struct octeon_droq *droq;
struct octeon_config *oct_cfg = NULL;
oct_cfg = octeon_get_conf(oct);
if (!oct_cfg)
return -EINVAL;
if (q_no >= CFG_GET_OQ_MAX_Q(oct_cfg)) {
dev_err(&oct->pci_dev->dev, "%s: droq id (%d) exceeds MAX (%d)\n",
__func__, q_no, (oct->num_oqs - 1));
return -EINVAL;
}
droq = oct->droq[q_no];
......
......@@ -121,6 +121,9 @@ struct oct_droq_stats {
/** Num of Packets dropped due to receive path failures. */
u64 rx_dropped;
/** Num of vxlan packets received; */
u64 rx_vxlan;
/** Num of failures of recv_buffer_alloc() */
u64 rx_alloc_failure;
......@@ -413,24 +416,9 @@ int octeon_register_dispatch_fn(struct octeon_device *oct,
u16 subcode,
octeon_dispatch_fn_t fn, void *fn_arg);
/** Remove registration for an opcode/subcode. This will delete the mapping for
* an opcode/subcode. The dispatch function will be unregistered and will no
* longer be called if a packet with the opcode/subcode arrives in the driver
* output queues.
* @param oct - the octeon device to unregister from.
* @param opcode - the opcode to be unregistered.
* @param subcode - the subcode to be unregistered.
*
* @return Success: 0; Failure: 1
*/
int octeon_unregister_dispatch_fn(struct octeon_device *oct,
u16 opcode,
u16 subcode);
void octeon_droq_print_stats(void);
u32 octeon_droq_check_hw_for_pkts(struct octeon_device *oct,
struct octeon_droq *droq);
u32 octeon_droq_check_hw_for_pkts(struct octeon_droq *droq);
int octeon_create_droq(struct octeon_device *oct, u32 q_no,
u32 num_descs, u32 desc_size, void *app_ctx);
......
......@@ -66,6 +66,7 @@ struct oct_iq_stats {
u64 tx_dropped;/**< Numof pkts dropped dueto xmitpath errors. */
u64 tx_tot_bytes;/**< Total count of bytes sento to network. */
u64 tx_gso; /* count of tso */
u64 tx_vxlan; /* tunnel */
u64 tx_dmamap_fail;
u64 tx_restart;
/*u64 tx_timeout_count;*/
......@@ -98,7 +99,7 @@ struct octeon_instr_queue {
u32 rsvd:17;
/* Controls the periodic flushing of iq */
/* Controls whether extra flushing of IQ is done on Tx */
u32 do_auto_flush:1;
u32 status:8;
......
......@@ -174,7 +174,7 @@ sleep_cond(wait_queue_head_t *wait_queue, int *condition)
init_waitqueue_entry(&we, current);
add_wait_queue(wait_queue, &we);
while (!(ACCESS_ONCE(*condition))) {
while (!(READ_ONCE(*condition))) {
set_current_state(TASK_INTERRUPTIBLE);
if (signal_pending(current))
goto out;
......
......@@ -19,43 +19,29 @@
* This file may also be available under a different license from Cavium.
* Contact Cavium, Inc. for more information
**********************************************************************/
#include <linux/version.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/kthread.h>
#include <linux/netdevice.h>
#include "octeon_config.h"
#include "liquidio_common.h"
#include "octeon_droq.h"
#include "octeon_iq.h"
#include "response_manager.h"
#include "octeon_device.h"
#include "octeon_nic.h"
#include "octeon_main.h"
#include "octeon_network.h"
#include "cn66xx_regs.h"
#include "cn66xx_device.h"
#include "cn68xx_regs.h"
#include "cn68xx_device.h"
#include "liquidio_image.h"
#include "octeon_mem_ops.h"
#define MEMOPS_IDX MAX_BAR1_MAP_INDEX
#ifdef __BIG_ENDIAN_BITFIELD
static inline void
octeon_toggle_bar1_swapmode(struct octeon_device *oct __attribute__((unused)),
u32 idx __attribute__((unused)))
octeon_toggle_bar1_swapmode(struct octeon_device *oct, u32 idx)
{
#ifdef __BIG_ENDIAN_BITFIELD
u32 mask;
mask = oct->fn_list.bar1_idx_read(oct, idx);
mask = (mask & 0x2) ? (mask & ~2) : (mask | 2);
oct->fn_list.bar1_idx_write(oct, idx, mask);
#endif
}
#else
#define octeon_toggle_bar1_swapmode(oct, idx) (oct = oct)
#endif
static void
octeon_pci_fastwrite(struct octeon_device *oct, u8 __iomem *mapped_addr,
......
......@@ -30,6 +30,9 @@
#include <linux/dma-mapping.h>
#include <linux/ptp_clock_kernel.h>
#define LIO_MAX_MTU_SIZE (OCTNET_MAX_FRM_SIZE - OCTNET_FRM_HEADER_SIZE)
#define LIO_MIN_MTU_SIZE 68
struct oct_nic_stats_resp {
u64 rh;
struct oct_link_stats stats;
......@@ -96,6 +99,12 @@ struct lio {
/** Copy of Interface capabilities: TSO, TSO6, LRO, Chescksums . */
u64 dev_capability;
/* Copy of transmit encapsulation capabilities:
* TSO, TSO6, Checksums for this device for Kernel
* 3.10.0 onwards
*/
u64 enc_dev_capability;
/** Copy of beacaon reg in phy */
u32 phy_beacon_val;
......@@ -115,7 +124,6 @@ struct lio {
/* work queue for txq status */
struct cavium_wq txq_status_wq;
};
#define LIO_SIZE (sizeof(struct lio))
......@@ -351,7 +359,7 @@ lio_map_ring_info(struct octeon_droq *droq, u32 i)
dma_addr = dma_map_single(&oct->pci_dev->dev, &droq->info_list[i],
OCT_DROQ_INFO_SIZE, DMA_FROM_DEVICE);
BUG_ON(dma_mapping_error(&oct->pci_dev->dev, dma_addr));
WARN_ON(dma_mapping_error(&oct->pci_dev->dev, dma_addr));
return (u64)dma_addr;
}
......
......@@ -19,14 +19,9 @@
* This file may also be available under a different license from Cavium.
* Contact Cavium, Inc. for more information
**********************************************************************/
#include <linux/version.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/kthread.h>
#include <linux/netdevice.h>
#include "octeon_config.h"
#include "liquidio_common.h"
#include "octeon_droq.h"
#include "octeon_iq.h"
......@@ -34,13 +29,6 @@
#include "octeon_device.h"
#include "octeon_nic.h"
#include "octeon_main.h"
#include "octeon_network.h"
#include "cn66xx_regs.h"
#include "cn66xx_device.h"
#include "cn68xx_regs.h"
#include "cn68xx_device.h"
#include "liquidio_image.h"
#include "octeon_mem_ops.h"
void *
octeon_alloc_soft_command_resp(struct octeon_device *oct,
......
......@@ -19,28 +19,17 @@
* This file may also be available under a different license from Cavium.
* Contact Cavium, Inc. for more information
**********************************************************************/
#include <linux/version.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/kthread.h>
#include <linux/netdevice.h>
#include <linux/vmalloc.h>
#include "octeon_config.h"
#include "liquidio_common.h"
#include "octeon_droq.h"
#include "octeon_iq.h"
#include "response_manager.h"
#include "octeon_device.h"
#include "octeon_nic.h"
#include "octeon_main.h"
#include "octeon_network.h"
#include "cn66xx_regs.h"
#include "cn66xx_device.h"
#include "cn68xx_regs.h"
#include "cn68xx_device.h"
#include "liquidio_image.h"
#define INCR_INSTRQUEUE_PKT_COUNT(octeon_dev_ptr, iq_no, field, count) \
(octeon_dev_ptr->instr_queue[iq_no]->stats.field += count)
......@@ -301,40 +290,8 @@ static inline void __copy_cmd_into_iq(struct octeon_instr_queue *iq,
memcpy(iqptr, cmd, cmdsize);
}
static inline int
__post_command(struct octeon_device *octeon_dev __attribute__((unused)),
struct octeon_instr_queue *iq,
u32 force_db __attribute__((unused)), u8 *cmd)
{
u32 index = -1;
/* This ensures that the read index does not wrap around to the same
* position if queue gets full before Octeon could fetch any instr.
*/
if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 1))
return -1;
__copy_cmd_into_iq(iq, cmd);
/* "index" is returned, host_write_index is modified. */
index = iq->host_write_index;
INCR_INDEX_BY1(iq->host_write_index, iq->max_count);
iq->fill_cnt++;
/* Flush the command into memory. We need to be sure the data is in
* memory before indicating that the instruction is pending.
*/
wmb();
atomic_inc(&iq->instr_pending);
return index;
}
static inline struct iq_post_status
__post_command2(struct octeon_device *octeon_dev __attribute__((unused)),
struct octeon_instr_queue *iq,
u32 force_db __attribute__((unused)), u8 *cmd)
__post_command2(struct octeon_instr_queue *iq, u8 *cmd)
{
struct iq_post_status st;
......@@ -392,6 +349,7 @@ __add_to_request_list(struct octeon_instr_queue *iq,
iq->request_list[idx].reqtype = reqtype;
}
/* Can only run in process context */
int
lio_process_iq_request_list(struct octeon_device *oct,
struct octeon_instr_queue *iq, u32 napi_budget)
......@@ -403,6 +361,7 @@ lio_process_iq_request_list(struct octeon_device *oct,
unsigned int pkts_compl = 0, bytes_compl = 0;
struct octeon_soft_command *sc;
struct octeon_instr_irh *irh;
unsigned long flags;
while (old != iq->octeon_read_index) {
reqtype = iq->request_list[old].reqtype;
......@@ -432,17 +391,22 @@ lio_process_iq_request_list(struct octeon_device *oct,
* command response list because we expect
* a response from Octeon.
*/
spin_lock_bh(&oct->response_list
[OCTEON_ORDERED_SC_LIST].lock);
spin_lock_irqsave
(&oct->response_list
[OCTEON_ORDERED_SC_LIST].lock,
flags);
atomic_inc(&oct->response_list
[OCTEON_ORDERED_SC_LIST].
pending_req_count);
list_add_tail(&sc->node, &oct->response_list
[OCTEON_ORDERED_SC_LIST].head);
spin_unlock_bh(&oct->response_list
[OCTEON_ORDERED_SC_LIST].lock);
spin_unlock_irqrestore
(&oct->response_list
[OCTEON_ORDERED_SC_LIST].lock,
flags);
} else {
if (sc->callback) {
/* This callback must not sleep */
sc->callback(oct, OCTEON_REQUEST_DONE,
sc->callback_arg);
}
......@@ -559,11 +523,12 @@ static void check_db_timeout(struct work_struct *work)
{
struct cavium_wk *wk = (struct cavium_wk *)work;
struct octeon_device *oct = (struct octeon_device *)wk->ctxptr;
unsigned long iq_no = wk->ctxul;
u64 iq_no = wk->ctxul;
struct cavium_wq *db_wq = &oct->check_db_wq[iq_no];
u32 delay = 10;
__check_db_timeout(oct, iq_no);
queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(1));
queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(delay));
}
int
......@@ -579,7 +544,7 @@ octeon_send_command(struct octeon_device *oct, u32 iq_no,
*/
spin_lock_bh(&iq->post_lock);
st = __post_command2(oct, iq, force_db, cmd);
st = __post_command2(iq, cmd);
if (st.status != IQ_SEND_FAILED) {
octeon_report_sent_bytes_to_bql(buf, reqtype);
......@@ -587,7 +552,7 @@ octeon_send_command(struct octeon_device *oct, u32 iq_no,
INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, bytes_sent, datasize);
INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_posted, 1);
if (iq->fill_cnt >= iq->fill_threshold || force_db)
if (force_db)
ring_doorbell(oct, iq);
} else {
INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_dropped, 1);
......@@ -618,8 +583,8 @@ octeon_prepare_soft_command(struct octeon_device *oct,
struct octeon_instr_irh *irh;
struct octeon_instr_rdp *rdp;
BUG_ON(opcode > 15);
BUG_ON(subcode > 127);
WARN_ON(opcode > 15);
WARN_ON(subcode > 127);
oct_cfg = octeon_get_conf(oct);
......@@ -661,7 +626,6 @@ int octeon_send_soft_command(struct octeon_device *oct,
{
struct octeon_instr_ih2 *ih2;
struct octeon_instr_irh *irh;
struct octeon_instr_rdp *rdp;
u32 len;
ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2;
......@@ -671,12 +635,10 @@ int octeon_send_soft_command(struct octeon_device *oct,
}
irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
if (irh->rflag) {
BUG_ON(!sc->dmarptr);
BUG_ON(!sc->status_word);
WARN_ON(!sc->dmarptr);
WARN_ON(!sc->status_word);
*sc->status_word = COMPLETION_WORD_INIT;
rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp;
sc->cmd.cmd2.rptr = sc->dmarptr;
}
len = (u32)ih2->dlengsz;
......@@ -720,7 +682,7 @@ int octeon_free_sc_buffer_pool(struct octeon_device *oct)
struct list_head *tmp, *tmp2;
struct octeon_soft_command *sc;
spin_lock(&oct->sc_buf_pool.lock);
spin_lock_bh(&oct->sc_buf_pool.lock);
list_for_each_safe(tmp, tmp2, &oct->sc_buf_pool.head) {
list_del(tmp);
......@@ -732,7 +694,7 @@ int octeon_free_sc_buffer_pool(struct octeon_device *oct)
INIT_LIST_HEAD(&oct->sc_buf_pool.head);
spin_unlock(&oct->sc_buf_pool.lock);
spin_unlock_bh(&oct->sc_buf_pool.lock);
return 0;
}
......@@ -748,13 +710,13 @@ struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct,
struct octeon_soft_command *sc = NULL;
struct list_head *tmp;
BUG_ON((offset + datasize + rdatasize + ctxsize) >
WARN_ON((offset + datasize + rdatasize + ctxsize) >
SOFT_COMMAND_BUFFER_SIZE);
spin_lock(&oct->sc_buf_pool.lock);
spin_lock_bh(&oct->sc_buf_pool.lock);
if (list_empty(&oct->sc_buf_pool.head)) {
spin_unlock(&oct->sc_buf_pool.lock);
spin_unlock_bh(&oct->sc_buf_pool.lock);
return NULL;
}
......@@ -765,7 +727,7 @@ struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct,
atomic_inc(&oct->sc_buf_pool.alloc_buf_count);
spin_unlock(&oct->sc_buf_pool.lock);
spin_unlock_bh(&oct->sc_buf_pool.lock);
sc = (struct octeon_soft_command *)tmp;
......@@ -795,7 +757,7 @@ struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct,
offset = (offset + datasize + 127) & 0xffffff80;
if (rdatasize) {
BUG_ON(rdatasize < 16);
WARN_ON(rdatasize < 16);
sc->virtrptr = (u8 *)sc + offset;
sc->dmarptr = dma_addr + offset;
sc->rdatasize = rdatasize;
......@@ -808,11 +770,11 @@ struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct,
void octeon_free_soft_command(struct octeon_device *oct,
struct octeon_soft_command *sc)
{
spin_lock(&oct->sc_buf_pool.lock);
spin_lock_bh(&oct->sc_buf_pool.lock);
list_add_tail(&sc->node, &oct->sc_buf_pool.head);
atomic_dec(&oct->sc_buf_pool.alloc_buf_count);
spin_unlock(&oct->sc_buf_pool.lock);
spin_unlock_bh(&oct->sc_buf_pool.lock);
}
......@@ -19,28 +19,14 @@
* This file may also be available under a different license from Cavium.
* Contact Cavium, Inc. for more information
**********************************************************************/
#include <linux/version.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/pci.h>
#include <linux/kthread.h>
#include <linux/netdevice.h>
#include "octeon_config.h"
#include "liquidio_common.h"
#include "octeon_droq.h"
#include "octeon_iq.h"
#include "response_manager.h"
#include "octeon_device.h"
#include "octeon_nic.h"
#include "octeon_main.h"
#include "octeon_network.h"
#include "cn66xx_regs.h"
#include "cn66xx_device.h"
#include "cn68xx_regs.h"
#include "cn68xx_device.h"
#include "liquidio_image.h"
static void oct_poll_req_completion(struct work_struct *work);
......@@ -66,7 +52,7 @@ int octeon_setup_response_list(struct octeon_device *oct)
INIT_DELAYED_WORK(&cwq->wk.work, oct_poll_req_completion);
cwq->wk.ctxptr = oct;
oct->cmd_resp_state = OCT_DRV_ONLINE;
queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(100));
queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(50));
return ret;
}
......@@ -176,6 +162,5 @@ static void oct_poll_req_completion(struct work_struct *work)
struct cavium_wq *cwq = &oct->dma_comp_wq;
lio_process_ordered_list(oct, 0);
queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(100));
queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(50));
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment