Commit 763185a3 authored by Raghu Vatsavayi's avatar Raghu Vatsavayi Committed by David S. Miller

liquidio CN23XX: code cleanup

Cleaned up unnecessary comments and added some minor macros.
Signed-off-by: default avatarRaghu Vatsavayi <raghu.vatsavayi@caviumnetworks.com>
Signed-off-by: default avatarDerek Chickles <derek.chickles@caviumnetworks.com>
Signed-off-by: default avatarSatanand Burla <satananda.burla@caviumnetworks.com>
Signed-off-by: default avatarFelix Manlunas <felix.manlunas@caviumnetworks.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 515e752d
...@@ -275,7 +275,6 @@ void lio_cn6xxx_setup_iq_regs(struct octeon_device *oct, u32 iq_no) ...@@ -275,7 +275,6 @@ void lio_cn6xxx_setup_iq_regs(struct octeon_device *oct, u32 iq_no)
{ {
struct octeon_instr_queue *iq = oct->instr_queue[iq_no]; struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
/* Disable Packet-by-Packet mode; No Parse Mode or Skip length */
octeon_write_csr64(oct, CN6XXX_SLI_IQ_PKT_INSTR_HDR64(iq_no), 0); octeon_write_csr64(oct, CN6XXX_SLI_IQ_PKT_INSTR_HDR64(iq_no), 0);
/* Write the start of the input queue's ring and its size */ /* Write the start of the input queue's ring and its size */
...@@ -378,7 +377,7 @@ void lio_cn6xxx_disable_io_queues(struct octeon_device *oct) ...@@ -378,7 +377,7 @@ void lio_cn6xxx_disable_io_queues(struct octeon_device *oct)
/* Reset the doorbell register for each Input queue. */ /* Reset the doorbell register for each Input queue. */
for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
if (!(oct->io_qmask.iq & (1ULL << i))) if (!(oct->io_qmask.iq & BIT_ULL(i)))
continue; continue;
octeon_write_csr(oct, CN6XXX_SLI_IQ_DOORBELL(i), 0xFFFFFFFF); octeon_write_csr(oct, CN6XXX_SLI_IQ_DOORBELL(i), 0xFFFFFFFF);
d32 = octeon_read_csr(oct, CN6XXX_SLI_IQ_DOORBELL(i)); d32 = octeon_read_csr(oct, CN6XXX_SLI_IQ_DOORBELL(i));
...@@ -400,9 +399,8 @@ void lio_cn6xxx_disable_io_queues(struct octeon_device *oct) ...@@ -400,9 +399,8 @@ void lio_cn6xxx_disable_io_queues(struct octeon_device *oct)
; ;
/* Reset the doorbell register for each Output queue. */ /* Reset the doorbell register for each Output queue. */
/* for (i = 0; i < oct->num_oqs; i++) { */
for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
if (!(oct->io_qmask.oq & (1ULL << i))) if (!(oct->io_qmask.oq & BIT_ULL(i)))
continue; continue;
octeon_write_csr(oct, CN6XXX_SLI_OQ_PKTS_CREDIT(i), 0xFFFFFFFF); octeon_write_csr(oct, CN6XXX_SLI_OQ_PKTS_CREDIT(i), 0xFFFFFFFF);
d32 = octeon_read_csr(oct, CN6XXX_SLI_OQ_PKTS_CREDIT(i)); d32 = octeon_read_csr(oct, CN6XXX_SLI_OQ_PKTS_CREDIT(i));
...@@ -537,15 +535,14 @@ static int lio_cn6xxx_process_droq_intr_regs(struct octeon_device *oct) ...@@ -537,15 +535,14 @@ static int lio_cn6xxx_process_droq_intr_regs(struct octeon_device *oct)
oct->droq_intr = 0; oct->droq_intr = 0;
/* for (oq_no = 0; oq_no < oct->num_oqs; oq_no++) { */
for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct); oq_no++) { for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct); oq_no++) {
if (!(droq_mask & (1ULL << oq_no))) if (!(droq_mask & BIT_ULL(oq_no)))
continue; continue;
droq = oct->droq[oq_no]; droq = oct->droq[oq_no];
pkt_count = octeon_droq_check_hw_for_pkts(droq); pkt_count = octeon_droq_check_hw_for_pkts(droq);
if (pkt_count) { if (pkt_count) {
oct->droq_intr |= (1ULL << oq_no); oct->droq_intr |= BIT_ULL(oq_no);
if (droq->ops.poll_mode) { if (droq->ops.poll_mode) {
u32 value; u32 value;
u32 reg; u32 reg;
...@@ -721,8 +718,6 @@ int lio_setup_cn66xx_octeon_device(struct octeon_device *oct) ...@@ -721,8 +718,6 @@ int lio_setup_cn66xx_octeon_device(struct octeon_device *oct)
int lio_validate_cn6xxx_config_info(struct octeon_device *oct, int lio_validate_cn6xxx_config_info(struct octeon_device *oct,
struct octeon_config *conf6xxx) struct octeon_config *conf6xxx)
{ {
/* int total_instrs = 0; */
if (CFG_GET_IQ_MAX_Q(conf6xxx) > CN6XXX_MAX_INPUT_QUEUES) { if (CFG_GET_IQ_MAX_Q(conf6xxx) > CN6XXX_MAX_INPUT_QUEUES) {
dev_err(&oct->pci_dev->dev, "%s: Num IQ (%d) exceeds Max (%d)\n", dev_err(&oct->pci_dev->dev, "%s: Num IQ (%d) exceeds Max (%d)\n",
__func__, CFG_GET_IQ_MAX_Q(conf6xxx), __func__, CFG_GET_IQ_MAX_Q(conf6xxx),
......
...@@ -96,8 +96,8 @@ void lio_cn6xxx_setup_reg_address(struct octeon_device *oct, void *chip, ...@@ -96,8 +96,8 @@ void lio_cn6xxx_setup_reg_address(struct octeon_device *oct, void *chip,
struct octeon_reg_list *reg_list); struct octeon_reg_list *reg_list);
u32 lio_cn6xxx_coprocessor_clock(struct octeon_device *oct); u32 lio_cn6xxx_coprocessor_clock(struct octeon_device *oct);
u32 lio_cn6xxx_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us); u32 lio_cn6xxx_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us);
int lio_setup_cn66xx_octeon_device(struct octeon_device *); int lio_setup_cn66xx_octeon_device(struct octeon_device *oct);
int lio_validate_cn6xxx_config_info(struct octeon_device *oct, int lio_validate_cn6xxx_config_info(struct octeon_device *oct,
struct octeon_config *); struct octeon_config *conf6xxx);
#endif #endif
...@@ -757,9 +757,6 @@ lio_get_ethtool_stats(struct net_device *netdev, ...@@ -757,9 +757,6 @@ lio_get_ethtool_stats(struct net_device *netdev,
/*sum of oct->instr_queue[iq_no]->stats.tx_dropped */ /*sum of oct->instr_queue[iq_no]->stats.tx_dropped */
data[i++] = CVM_CAST64(netstats->tx_dropped); data[i++] = CVM_CAST64(netstats->tx_dropped);
/*data[i++] = CVM_CAST64(stats->multicast); */
/*data[i++] = CVM_CAST64(stats->collisions); */
/* firmware tx stats */ /* firmware tx stats */
/*per_core_stats[cvmx_get_core_num()].link_stats[mdata->from_ifidx]. /*per_core_stats[cvmx_get_core_num()].link_stats[mdata->from_ifidx].
*fromhost.fw_total_sent *fromhost.fw_total_sent
...@@ -910,9 +907,8 @@ lio_get_ethtool_stats(struct net_device *netdev, ...@@ -910,9 +907,8 @@ lio_get_ethtool_stats(struct net_device *netdev,
/*lio->link_changes*/ /*lio->link_changes*/
data[i++] = CVM_CAST64(lio->link_changes); data[i++] = CVM_CAST64(lio->link_changes);
/* TX -- lio_update_stats(lio); */
for (j = 0; j < MAX_OCTEON_INSTR_QUEUES(oct_dev); j++) { for (j = 0; j < MAX_OCTEON_INSTR_QUEUES(oct_dev); j++) {
if (!(oct_dev->io_qmask.iq & (1ULL << j))) if (!(oct_dev->io_qmask.iq & BIT_ULL(j)))
continue; continue;
/*packets to network port*/ /*packets to network port*/
/*# of packets tx to network */ /*# of packets tx to network */
...@@ -954,9 +950,8 @@ lio_get_ethtool_stats(struct net_device *netdev, ...@@ -954,9 +950,8 @@ lio_get_ethtool_stats(struct net_device *netdev,
} }
/* RX */ /* RX */
/* for (j = 0; j < oct_dev->num_oqs; j++) { */
for (j = 0; j < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); j++) { for (j = 0; j < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); j++) {
if (!(oct_dev->io_qmask.oq & (1ULL << j))) if (!(oct_dev->io_qmask.oq & BIT_ULL(j)))
continue; continue;
/*packets send to TCP/IP network stack */ /*packets send to TCP/IP network stack */
...@@ -1030,7 +1025,7 @@ static void lio_get_strings(struct net_device *netdev, u32 stringset, u8 *data) ...@@ -1030,7 +1025,7 @@ static void lio_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings); num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings);
for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct_dev); i++) { for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct_dev); i++) {
if (!(oct_dev->io_qmask.iq & (1ULL << i))) if (!(oct_dev->io_qmask.iq & BIT_ULL(i)))
continue; continue;
for (j = 0; j < num_iq_stats; j++) { for (j = 0; j < num_iq_stats; j++) {
sprintf(data, "tx-%d-%s", i, sprintf(data, "tx-%d-%s", i,
...@@ -1040,9 +1035,8 @@ static void lio_get_strings(struct net_device *netdev, u32 stringset, u8 *data) ...@@ -1040,9 +1035,8 @@ static void lio_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
} }
num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings); num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings);
/* for (i = 0; i < oct_dev->num_oqs; i++) { */
for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); i++) { for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); i++) {
if (!(oct_dev->io_qmask.oq & (1ULL << i))) if (!(oct_dev->io_qmask.oq & BIT_ULL(i)))
continue; continue;
for (j = 0; j < num_oq_stats; j++) { for (j = 0; j < num_oq_stats; j++) {
sprintf(data, "rx-%d-%s", i, sprintf(data, "rx-%d-%s", i,
......
...@@ -19,10 +19,8 @@ ...@@ -19,10 +19,8 @@
* This file may also be available under a different license from Cavium. * This file may also be available under a different license from Cavium.
* Contact Cavium, Inc. for more information * Contact Cavium, Inc. for more information
**********************************************************************/ **********************************************************************/
#include <linux/version.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/firmware.h> #include <linux/firmware.h>
#include <linux/ptp_clock_kernel.h>
#include <net/vxlan.h> #include <net/vxlan.h>
#include <linux/kthread.h> #include <linux/kthread.h>
#include "liquidio_common.h" #include "liquidio_common.h"
...@@ -201,9 +199,8 @@ static void octeon_droq_bh(unsigned long pdev) ...@@ -201,9 +199,8 @@ static void octeon_droq_bh(unsigned long pdev)
struct octeon_device_priv *oct_priv = struct octeon_device_priv *oct_priv =
(struct octeon_device_priv *)oct->priv; (struct octeon_device_priv *)oct->priv;
/* for (q_no = 0; q_no < oct->num_oqs; q_no++) { */
for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES(oct); q_no++) { for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES(oct); q_no++) {
if (!(oct->io_qmask.oq & (1ULL << q_no))) if (!(oct->io_qmask.oq & BIT_ULL(q_no)))
continue; continue;
reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no], reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no],
MAX_PACKET_BUDGET); MAX_PACKET_BUDGET);
...@@ -238,7 +235,7 @@ static int lio_wait_for_oq_pkts(struct octeon_device *oct) ...@@ -238,7 +235,7 @@ static int lio_wait_for_oq_pkts(struct octeon_device *oct)
pending_pkts = 0; pending_pkts = 0;
for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
if (!(oct->io_qmask.oq & (1ULL << i))) if (!(oct->io_qmask.oq & BIT_ULL(i)))
continue; continue;
pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]); pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]);
} }
...@@ -320,7 +317,7 @@ static inline void pcierror_quiesce_device(struct octeon_device *oct) ...@@ -320,7 +317,7 @@ static inline void pcierror_quiesce_device(struct octeon_device *oct)
for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
struct octeon_instr_queue *iq; struct octeon_instr_queue *iq;
if (!(oct->io_qmask.iq & (1ULL << i))) if (!(oct->io_qmask.iq & BIT_ULL(i)))
continue; continue;
iq = oct->instr_queue[i]; iq = oct->instr_queue[i];
...@@ -386,7 +383,6 @@ static void stop_pci_io(struct octeon_device *oct) ...@@ -386,7 +383,6 @@ static void stop_pci_io(struct octeon_device *oct)
dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n", dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
lio_get_state_string(&oct->status)); lio_get_state_string(&oct->status));
/* cn63xx_cleanup_aer_uncorrect_error_status(oct->pci_dev); */
/* making it a common function for all OCTEON models */ /* making it a common function for all OCTEON models */
cleanup_aer_uncorrect_error_status(oct->pci_dev); cleanup_aer_uncorrect_error_status(oct->pci_dev);
} }
...@@ -941,7 +937,6 @@ static inline void update_link_status(struct net_device *netdev, ...@@ -941,7 +937,6 @@ static inline void update_link_status(struct net_device *netdev,
if (lio->linfo.link.s.link_up) { if (lio->linfo.link.s.link_up) {
netif_carrier_on(netdev); netif_carrier_on(netdev);
/* start_txq(netdev); */
txqs_wake(netdev); txqs_wake(netdev);
} else { } else {
netif_carrier_off(netdev); netif_carrier_off(netdev);
...@@ -1019,7 +1014,7 @@ static void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct) ...@@ -1019,7 +1014,7 @@ static void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct)
if (oct->int_status & OCT_DEV_INTR_PKT_DATA) { if (oct->int_status & OCT_DEV_INTR_PKT_DATA) {
for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct); for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct);
oq_no++) { oq_no++) {
if (!(oct->droq_intr & (1ULL << oq_no))) if (!(oct->droq_intr & BIT_ULL(oq_no)))
continue; continue;
droq = oct->droq[oq_no]; droq = oct->droq[oq_no];
...@@ -1468,7 +1463,7 @@ static void octeon_destroy_resources(struct octeon_device *oct) ...@@ -1468,7 +1463,7 @@ static void octeon_destroy_resources(struct octeon_device *oct)
/* fallthrough */ /* fallthrough */
case OCT_DEV_IN_RESET: case OCT_DEV_IN_RESET:
case OCT_DEV_DROQ_INIT_DONE: case OCT_DEV_DROQ_INIT_DONE:
/*atomic_set(&oct->status, OCT_DEV_DROQ_INIT_DONE);*/ /* Wait for any pending operations */
mdelay(100); mdelay(100);
for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
if (!(oct->io_qmask.oq & BIT_ULL(i))) if (!(oct->io_qmask.oq & BIT_ULL(i)))
...@@ -2461,7 +2456,6 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget) ...@@ -2461,7 +2456,6 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget)
* Return back if tx_done is false. * Return back if tx_done is false.
*/ */
update_txq_status(oct, iq_no); update_txq_status(oct, iq_no);
/*tx_done = (iq->flush_index == iq->octeon_read_index);*/
} else { } else {
dev_err(&oct->pci_dev->dev, "%s: iq (%d) num invalid\n", dev_err(&oct->pci_dev->dev, "%s: iq (%d) num invalid\n",
__func__, iq_no); __func__, iq_no);
......
...@@ -68,8 +68,6 @@ enum octeon_tag_type { ...@@ -68,8 +68,6 @@ enum octeon_tag_type {
*/ */
#define OPCODE_CORE 0 /* used for generic core operations */ #define OPCODE_CORE 0 /* used for generic core operations */
#define OPCODE_NIC 1 /* used for NIC operations */ #define OPCODE_NIC 1 /* used for NIC operations */
#define OPCODE_LAST OPCODE_NIC
/* Subcodes are used by host driver/apps to identify the sub-operation /* Subcodes are used by host driver/apps to identify the sub-operation
* for the core. They only need to by unique for a given subsystem. * for the core. They only need to by unique for a given subsystem.
*/ */
......
...@@ -649,12 +649,12 @@ void octeon_free_device_mem(struct octeon_device *oct) ...@@ -649,12 +649,12 @@ void octeon_free_device_mem(struct octeon_device *oct)
int i; int i;
for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
if (oct->io_qmask.oq & (1ULL << i)) if (oct->io_qmask.oq & BIT_ULL(i))
vfree(oct->droq[i]); vfree(oct->droq[i]);
} }
for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
if (oct->io_qmask.iq & (1ULL << i)) if (oct->io_qmask.iq & BIT_ULL(i))
vfree(oct->instr_queue[i]); vfree(oct->instr_queue[i]);
} }
...@@ -1148,7 +1148,7 @@ int octeon_get_tx_qsize(struct octeon_device *oct, u32 q_no) ...@@ -1148,7 +1148,7 @@ int octeon_get_tx_qsize(struct octeon_device *oct, u32 q_no)
{ {
if (oct && (q_no < MAX_OCTEON_INSTR_QUEUES(oct)) && if (oct && (q_no < MAX_OCTEON_INSTR_QUEUES(oct)) &&
(oct->io_qmask.iq & (1ULL << q_no))) (oct->io_qmask.iq & BIT_ULL(q_no)))
return oct->instr_queue[q_no]->max_count; return oct->instr_queue[q_no]->max_count;
return -1; return -1;
...@@ -1157,7 +1157,7 @@ int octeon_get_tx_qsize(struct octeon_device *oct, u32 q_no) ...@@ -1157,7 +1157,7 @@ int octeon_get_tx_qsize(struct octeon_device *oct, u32 q_no)
int octeon_get_rx_qsize(struct octeon_device *oct, u32 q_no) int octeon_get_rx_qsize(struct octeon_device *oct, u32 q_no)
{ {
if (oct && (q_no < MAX_OCTEON_OUTPUT_QUEUES(oct)) && if (oct && (q_no < MAX_OCTEON_OUTPUT_QUEUES(oct)) &&
(oct->io_qmask.oq & (1ULL << q_no))) (oct->io_qmask.oq & BIT_ULL(q_no)))
return oct->droq[q_no]->max_count; return oct->droq[q_no]->max_count;
return -1; return -1;
} }
......
...@@ -337,7 +337,7 @@ int octeon_init_droq(struct octeon_device *oct, ...@@ -337,7 +337,7 @@ int octeon_init_droq(struct octeon_device *oct,
/* For 56xx Pass1, this function won't be called, so no checks. */ /* For 56xx Pass1, this function won't be called, so no checks. */
oct->fn_list.setup_oq_regs(oct, q_no); oct->fn_list.setup_oq_regs(oct, q_no);
oct->io_qmask.oq |= (1ULL << q_no); oct->io_qmask.oq |= BIT_ULL(q_no);
return 0; return 0;
......
...@@ -121,7 +121,6 @@ struct oct_droq_stats { ...@@ -121,7 +121,6 @@ struct oct_droq_stats {
/** Num of Packets dropped due to receive path failures. */ /** Num of Packets dropped due to receive path failures. */
u64 rx_dropped; u64 rx_dropped;
/** Num of vxlan packets received; */
u64 rx_vxlan; u64 rx_vxlan;
/** Num of failures of recv_buffer_alloc() */ /** Num of failures of recv_buffer_alloc() */
......
...@@ -69,7 +69,6 @@ struct oct_iq_stats { ...@@ -69,7 +69,6 @@ struct oct_iq_stats {
u64 tx_vxlan; /* tunnel */ u64 tx_vxlan; /* tunnel */
u64 tx_dmamap_fail; u64 tx_dmamap_fail;
u64 tx_restart; u64 tx_restart;
/*u64 tx_timeout_count;*/
}; };
#define OCT_IQ_STATS_SIZE (sizeof(struct oct_iq_stats)) #define OCT_IQ_STATS_SIZE (sizeof(struct oct_iq_stats))
......
...@@ -207,24 +207,6 @@ sleep_cond(wait_queue_head_t *wait_queue, int *condition) ...@@ -207,24 +207,6 @@ sleep_cond(wait_queue_head_t *wait_queue, int *condition)
return errno; return errno;
} }
static inline void
sleep_atomic_cond(wait_queue_head_t *waitq, atomic_t *pcond)
{
wait_queue_t we;
init_waitqueue_entry(&we, current);
add_wait_queue(waitq, &we);
while (!atomic_read(pcond)) {
set_current_state(TASK_INTERRUPTIBLE);
if (signal_pending(current))
goto out;
schedule();
}
out:
set_current_state(TASK_RUNNING);
remove_wait_queue(waitq, &we);
}
/* Gives up the CPU for a timeout period. /* Gives up the CPU for a timeout period.
* Check that the condition is not true before we go to sleep for a * Check that the condition is not true before we go to sleep for a
* timeout period. * timeout period.
......
...@@ -145,7 +145,7 @@ int octeon_init_instr_queue(struct octeon_device *oct, ...@@ -145,7 +145,7 @@ int octeon_init_instr_queue(struct octeon_device *oct,
spin_lock_init(&iq->iq_flush_running_lock); spin_lock_init(&iq->iq_flush_running_lock);
oct->io_qmask.iq |= (1ULL << iq_no); oct->io_qmask.iq |= BIT_ULL(iq_no);
/* Set the 32B/64B mode for each input queue */ /* Set the 32B/64B mode for each input queue */
oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no); oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no);
...@@ -252,9 +252,8 @@ int lio_wait_for_instr_fetch(struct octeon_device *oct) ...@@ -252,9 +252,8 @@ int lio_wait_for_instr_fetch(struct octeon_device *oct)
do { do {
instr_cnt = 0; instr_cnt = 0;
/*for (i = 0; i < oct->num_iqs; i++) {*/
for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
if (!(oct->io_qmask.iq & (1ULL << i))) if (!(oct->io_qmask.iq & BIT_ULL(i)))
continue; continue;
pending = pending =
atomic_read(&oct-> atomic_read(&oct->
...@@ -579,8 +578,6 @@ octeon_send_command(struct octeon_device *oct, u32 iq_no, ...@@ -579,8 +578,6 @@ octeon_send_command(struct octeon_device *oct, u32 iq_no,
/* This is only done here to expedite packets being flushed /* This is only done here to expedite packets being flushed
* for cases where there are no IQ completion interrupts. * for cases where there are no IQ completion interrupts.
*/ */
/*if (iq->do_auto_flush)*/
/* octeon_flush_iq(oct, iq, 2, 0);*/
return st.status; return st.status;
} }
......
...@@ -81,11 +81,7 @@ int lio_process_ordered_list(struct octeon_device *octeon_dev, ...@@ -81,11 +81,7 @@ int lio_process_ordered_list(struct octeon_device *octeon_dev,
spin_lock_bh(&ordered_sc_list->lock); spin_lock_bh(&ordered_sc_list->lock);
if (ordered_sc_list->head.next == &ordered_sc_list->head) { if (ordered_sc_list->head.next == &ordered_sc_list->head) {
/* ordered_sc_list is empty; there is spin_unlock_bh(&ordered_sc_list->lock);
* nothing to process
*/
spin_unlock_bh
(&ordered_sc_list->lock);
return 1; return 1;
} }
......
...@@ -85,7 +85,6 @@ enum { ...@@ -85,7 +85,6 @@ enum {
/** A value of 0x00000000 indicates no error i.e. success */ /** A value of 0x00000000 indicates no error i.e. success */
#define DRIVER_ERROR_NONE 0x00000000 #define DRIVER_ERROR_NONE 0x00000000
/** (Major number: 0x0000; Minor Number: 0x0001) */
#define DRIVER_ERROR_REQ_PENDING 0x00000001 #define DRIVER_ERROR_REQ_PENDING 0x00000001
#define DRIVER_ERROR_REQ_TIMEOUT 0x00000003 #define DRIVER_ERROR_REQ_TIMEOUT 0x00000003
#define DRIVER_ERROR_REQ_EINTR 0x00000004 #define DRIVER_ERROR_REQ_EINTR 0x00000004
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment