Commit 4930fbf4 authored by Pavan Kumar Linga's avatar Pavan Kumar Linga Committed by Tony Nguyen

idpf: add core init and interrupt request

As the mailbox is setup, add the necessary send and receive
mailbox message framework to support the virtchnl communication
between the driver and device Control Plane (CP).

Add the core initialization. To start with, driver confirms the
virtchnl version with the CP. Once that is done, it requests
and gets the required capabilities and resources needed such as
max vectors, queues etc.

Based on the vector information received in 'VIRTCHNL2_OP_GET_CAPS',
request the stack to allocate the required vectors. Finally add
the interrupt handling mechanism for the mailbox queue and enable
the interrupt.

Note: Checkpatch issues a warning about IDPF_FOREACH_VPORT_VC_STATE and
IDPF_GEN_STRING being complex macros and should be enclosed in parentheses
but it's not the case. They are never used as a statement and instead only
used to define the enum and array.
Co-developed-by: default avatarAlan Brady <alan.brady@intel.com>
Signed-off-by: default avatarAlan Brady <alan.brady@intel.com>
Co-developed-by: default avatarEmil Tantilov <emil.s.tantilov@intel.com>
Signed-off-by: default avatarEmil Tantilov <emil.s.tantilov@intel.com>
Co-developed-by: default avatarJoshua Hay <joshua.a.hay@intel.com>
Signed-off-by: default avatarJoshua Hay <joshua.a.hay@intel.com>
Co-developed-by: default avatarMadhu Chittim <madhu.chittim@intel.com>
Signed-off-by: default avatarMadhu Chittim <madhu.chittim@intel.com>
Co-developed-by: default avatarPhani Burra <phani.r.burra@intel.com>
Signed-off-by: default avatarPhani Burra <phani.r.burra@intel.com>
Co-developed-by: default avatarShailendra Bhatnagar <shailendra.bhatnagar@intel.com>
Signed-off-by: default avatarShailendra Bhatnagar <shailendra.bhatnagar@intel.com>
Reviewed-by: default avatarSridhar Samudrala <sridhar.samudrala@intel.com>
Reviewed-by: default avatarWillem de Bruijn <willemb@google.com>
Signed-off-by: default avatarPavan Kumar Linga <pavan.kumar.linga@intel.com>
Signed-off-by: default avatarTony Nguyen <anthony.l.nguyen@intel.com>
parent 8077c727
......@@ -11,23 +11,40 @@ struct idpf_adapter;
#include <linux/etherdevice.h>
#include <linux/pci.h>
#include "virtchnl2.h"
#include "idpf_txrx.h"
#include "idpf_controlq.h"
/* Default Mailbox settings */
#define IDPF_NUM_DFLT_MBX_Q 2 /* includes both TX and RX */
#define IDPF_DFLT_MBX_Q_LEN 64
#define IDPF_DFLT_MBX_ID -1
/* maximum number of times to try before resetting mailbox */
#define IDPF_MB_MAX_ERR 20
#define IDPF_WAIT_FOR_EVENT_TIMEO_MIN 2000
#define IDPF_WAIT_FOR_EVENT_TIMEO 60000
#define IDPF_MAX_WAIT 500
/* available message levels */
#define IDPF_AVAIL_NETIF_M (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
#define IDPF_VIRTCHNL_VERSION_MAJOR VIRTCHNL2_VERSION_MAJOR_2
#define IDPF_VIRTCHNL_VERSION_MINOR VIRTCHNL2_VERSION_MINOR_0
/**
* enum idpf_state - State machine to handle bring up
* @__IDPF_STARTUP: Start the state machine
* @__IDPF_VER_CHECK: Negotiate virtchnl version
* @__IDPF_GET_CAPS: Negotiate capabilities
* @__IDPF_INIT_SW: Init based on given capabilities
* @__IDPF_STATE_LAST: Must be last, used to determine size
*/
enum idpf_state {
__IDPF_STARTUP,
__IDPF_VER_CHECK,
__IDPF_GET_CAPS,
__IDPF_INIT_SW,
__IDPF_STATE_LAST,
};
......@@ -37,6 +54,7 @@ enum idpf_state {
* @IDPF_HR_DRV_LOAD: Set on driver load for a clean HW
* @IDPF_HR_RESET_IN_PROG: Reset in progress
* @IDPF_REMOVE_IN_PROG: Driver remove in progress
* @IDPF_MB_INTR_MODE: Mailbox in interrupt mode
* @IDPF_FLAGS_NBITS: Must be last
*/
enum idpf_flags {
......@@ -44,6 +62,7 @@ enum idpf_flags {
IDPF_HR_DRV_LOAD,
IDPF_HR_RESET_IN_PROG,
IDPF_REMOVE_IN_PROG,
IDPF_MB_INTR_MODE,
IDPF_FLAGS_NBITS,
};
......@@ -60,11 +79,13 @@ struct idpf_reset_reg {
/**
* struct idpf_reg_ops - Device specific register operation function pointers
* @ctlq_reg_init: Mailbox control queue register initialization
* @mb_intr_reg_init: Mailbox interrupt register initialization
* @reset_reg_init: Reset register initialization
* @trigger_reset: Trigger a reset to occur
*/
struct idpf_reg_ops {
void (*ctlq_reg_init)(struct idpf_ctlq_create_info *cq);
void (*mb_intr_reg_init)(struct idpf_adapter *adapter);
void (*reset_reg_init)(struct idpf_adapter *adapter);
void (*trigger_reset)(struct idpf_adapter *adapter,
enum idpf_flags trig_cause);
......@@ -78,35 +99,149 @@ struct idpf_dev_ops {
struct idpf_reg_ops reg_ops;
};
/* These macros allow us to generate an enum and a matching char * array of
* stringified enums that are always in sync. Checkpatch issues a bogus warning
* about this being a complex macro; but it's wrong, these are never used as a
* statement and instead only used to define the enum and array.
*/
#define IDPF_FOREACH_VPORT_VC_STATE(STATE) \
STATE(IDPF_VC_ALLOC_VECTORS) \
STATE(IDPF_VC_ALLOC_VECTORS_ERR) \
STATE(IDPF_VC_DEALLOC_VECTORS) \
STATE(IDPF_VC_DEALLOC_VECTORS_ERR) \
STATE(IDPF_VC_NBITS)
#define IDPF_GEN_ENUM(ENUM) ENUM,
#define IDPF_GEN_STRING(STRING) #STRING,
enum idpf_vport_vc_state {
IDPF_FOREACH_VPORT_VC_STATE(IDPF_GEN_ENUM)
};
extern const char * const idpf_vport_vc_state_str[];
/**
* struct idpf_vport - Handle for netdevices and queue resources
* @vport_id: Device given vport identifier
*/
struct idpf_vport {
u32 vport_id;
};
/**
* struct idpf_vector_lifo - Stack to maintain vector indexes used for vector
* distribution algorithm
* @top: Points to stack top i.e. next available vector index
* @base: Always points to start of the free pool
* @size: Total size of the vector stack
* @vec_idx: Array to store all the vector indexes
*
* Vector stack maintains all the relative vector indexes at the *adapter*
* level. This stack is divided into 2 parts, first one is called as 'default
* pool' and other one is called 'free pool'. Vector distribution algorithm
* gives priority to default vports in a way that at least IDPF_MIN_Q_VEC
* vectors are allocated per default vport and the relative vector indexes for
* those are maintained in default pool. Free pool contains all the unallocated
* vector indexes which can be allocated on-demand basis. Mailbox vector index
* is maintained in the default pool of the stack.
*/
struct idpf_vector_lifo {
u16 top;
u16 base;
u16 size;
u16 *vec_idx;
};
/**
* struct idpf_adapter - Device data struct generated on probe
* @pdev: PCI device struct given on probe
* @virt_ver_maj: Virtchnl version major
* @virt_ver_min: Virtchnl version minor
* @msg_enable: Debug message level enabled
* @mb_wait_count: Number of times mailbox was attempted initialization
* @state: Init state machine
* @flags: See enum idpf_flags
* @reset_reg: See struct idpf_reset_reg
* @hw: Device access data
* @num_req_msix: Requested number of MSIX vectors
* @num_avail_msix: Available number of MSIX vectors
* @num_msix_entries: Number of entries in MSIX table
* @msix_entries: MSIX table
* @req_vec_chunks: Requested vector chunk data
* @mb_vector: Mailbox vector data
* @vector_stack: Stack to store the msix vector indexes
* @irq_mb_handler: Handler for hard interrupt for mailbox
* @serv_task: Periodically recurring maintenance task
* @serv_wq: Workqueue for service task
* @mbx_task: Task to handle mailbox interrupts
* @mbx_wq: Workqueue for mailbox responses
* @vc_event_task: Task to handle out of band virtchnl event notifications
* @vc_event_wq: Workqueue for virtchnl events
* @caps: Negotiated capabilities with device
* @vchnl_wq: Wait queue for virtchnl messages
* @vc_state: Virtchnl message state
* @vc_msg: Virtchnl message buffer
* @dev_ops: See idpf_dev_ops
* @vport_ctrl_lock: Lock to protect the vport control flow
* @vector_lock: Lock to protect vector distribution
* @vc_buf_lock: Lock to protect virtchnl buffer
*/
struct idpf_adapter {
struct pci_dev *pdev;
u32 virt_ver_maj;
u32 virt_ver_min;
u32 msg_enable;
u32 mb_wait_count;
enum idpf_state state;
DECLARE_BITMAP(flags, IDPF_FLAGS_NBITS);
struct idpf_reset_reg reset_reg;
struct idpf_hw hw;
u16 num_req_msix;
u16 num_avail_msix;
u16 num_msix_entries;
struct msix_entry *msix_entries;
struct virtchnl2_alloc_vectors *req_vec_chunks;
struct idpf_q_vector mb_vector;
struct idpf_vector_lifo vector_stack;
irqreturn_t (*irq_mb_handler)(int irq, void *data);
struct delayed_work serv_task;
struct workqueue_struct *serv_wq;
struct delayed_work mbx_task;
struct workqueue_struct *mbx_wq;
struct delayed_work vc_event_task;
struct workqueue_struct *vc_event_wq;
struct virtchnl2_get_capabilities caps;
wait_queue_head_t vchnl_wq;
DECLARE_BITMAP(vc_state, IDPF_VC_NBITS);
char vc_msg[IDPF_CTLQ_MAX_BUF_LEN];
struct idpf_dev_ops dev_ops;
struct mutex vport_ctrl_lock;
struct mutex vector_lock;
struct mutex vc_buf_lock;
};
/**
* idpf_get_reserved_vecs - Get reserved vectors
* @adapter: private data struct
*/
static inline u16 idpf_get_reserved_vecs(struct idpf_adapter *adapter)
{
return le16_to_cpu(adapter->caps.num_allocated_vectors);
}
/**
* idpf_get_default_vports - Get default number of vports
* @adapter: private data struct
*/
static inline u16 idpf_get_default_vports(struct idpf_adapter *adapter)
{
return le16_to_cpu(adapter->caps.default_num_vports);
}
/**
* idpf_get_reg_addr - Get BAR0 register address
* @adapter: private data struct
......@@ -135,10 +270,38 @@ static inline bool idpf_is_reset_detected(struct idpf_adapter *adapter)
adapter->hw.arq->reg.len_mask);
}
/**
* idpf_is_reset_in_prog - check if reset is in progress
* @adapter: driver specific private structure
*
* Returns true if hard reset is in progress, false otherwise
*/
static inline bool idpf_is_reset_in_prog(struct idpf_adapter *adapter)
{
return (test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags) ||
test_bit(IDPF_HR_FUNC_RESET, adapter->flags) ||
test_bit(IDPF_HR_DRV_LOAD, adapter->flags));
}
void idpf_service_task(struct work_struct *work);
void idpf_mbx_task(struct work_struct *work);
void idpf_vc_event_task(struct work_struct *work);
void idpf_dev_ops_init(struct idpf_adapter *adapter);
void idpf_vf_dev_ops_init(struct idpf_adapter *adapter);
int idpf_init_dflt_mbx(struct idpf_adapter *adapter);
void idpf_deinit_dflt_mbx(struct idpf_adapter *adapter);
int idpf_vc_core_init(struct idpf_adapter *adapter);
void idpf_vc_core_deinit(struct idpf_adapter *adapter);
int idpf_intr_req(struct idpf_adapter *adapter);
void idpf_intr_rel(struct idpf_adapter *adapter);
int idpf_send_dealloc_vectors_msg(struct idpf_adapter *adapter);
int idpf_send_alloc_vectors_msg(struct idpf_adapter *adapter, u16 num_vectors);
int idpf_get_vec_ids(struct idpf_adapter *adapter,
u16 *vecids, int num_vecids,
struct virtchnl2_vector_chunks *chunks);
int idpf_recv_mb_msg(struct idpf_adapter *adapter, u32 op,
void *msg, int msg_size);
int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
u16 msg_size, u8 *msg);
#endif /* !_IDPF_H_ */
......@@ -44,6 +44,22 @@ static void idpf_ctlq_reg_init(struct idpf_ctlq_create_info *cq)
}
}
/**
* idpf_mb_intr_reg_init - Initialize mailbox interrupt register
* @adapter: adapter structure
*/
static void idpf_mb_intr_reg_init(struct idpf_adapter *adapter)
{
struct idpf_intr_reg *intr = &adapter->mb_vector.intr_reg;
u32 dyn_ctl = le32_to_cpu(adapter->caps.mailbox_dyn_ctl);
intr->dyn_ctl = idpf_get_reg_addr(adapter, dyn_ctl);
intr->dyn_ctl_intena_m = PF_GLINT_DYN_CTL_INTENA_M;
intr->dyn_ctl_itridx_m = PF_GLINT_DYN_CTL_ITR_INDX_M;
intr->icr_ena = idpf_get_reg_addr(adapter, PF_INT_DIR_OICR_ENA);
intr->icr_ena_ctlq_m = PF_INT_DIR_OICR_ENA_M;
}
/**
* idpf_reset_reg_init - Initialize reset registers
* @adapter: Driver specific private structure
......@@ -76,6 +92,7 @@ static void idpf_trigger_reset(struct idpf_adapter *adapter,
static void idpf_reg_ops_init(struct idpf_adapter *adapter)
{
adapter->dev_ops.reg_ops.ctlq_reg_init = idpf_ctlq_reg_init;
adapter->dev_ops.reg_ops.mb_intr_reg_init = idpf_mb_intr_reg_init;
adapter->dev_ops.reg_ops.reset_reg_init = idpf_reset_reg_init;
adapter->dev_ops.reg_ops.trigger_reset = idpf_trigger_reset;
}
......
......@@ -53,6 +53,49 @@
#define PF_FW_ATQH_ATQH_M GENMASK(9, 0)
#define PF_FW_ATQT (PF_FW_BASE + 0x24)
/* Interrupts */
#define PF_GLINT_BASE 0x08900000
#define PF_GLINT_DYN_CTL(_INT) (PF_GLINT_BASE + ((_INT) * 0x1000))
#define PF_GLINT_DYN_CTL_INTENA_S 0
#define PF_GLINT_DYN_CTL_INTENA_M BIT(PF_GLINT_DYN_CTL_INTENA_S)
#define PF_GLINT_DYN_CTL_CLEARPBA_S 1
#define PF_GLINT_DYN_CTL_CLEARPBA_M BIT(PF_GLINT_DYN_CTL_CLEARPBA_S)
#define PF_GLINT_DYN_CTL_SWINT_TRIG_S 2
#define PF_GLINT_DYN_CTL_SWINT_TRIG_M BIT(PF_GLINT_DYN_CTL_SWINT_TRIG_S)
#define PF_GLINT_DYN_CTL_ITR_INDX_S 3
#define PF_GLINT_DYN_CTL_ITR_INDX_M GENMASK(4, 3)
#define PF_GLINT_DYN_CTL_INTERVAL_S 5
#define PF_GLINT_DYN_CTL_INTERVAL_M BIT(PF_GLINT_DYN_CTL_INTERVAL_S)
#define PF_GLINT_DYN_CTL_SW_ITR_INDX_ENA_S 24
#define PF_GLINT_DYN_CTL_SW_ITR_INDX_ENA_M BIT(PF_GLINT_DYN_CTL_SW_ITR_INDX_ENA_S)
#define PF_GLINT_DYN_CTL_SW_ITR_INDX_S 25
#define PF_GLINT_DYN_CTL_SW_ITR_INDX_M BIT(PF_GLINT_DYN_CTL_SW_ITR_INDX_S)
#define PF_GLINT_DYN_CTL_WB_ON_ITR_S 30
#define PF_GLINT_DYN_CTL_WB_ON_ITR_M BIT(PF_GLINT_DYN_CTL_WB_ON_ITR_S)
#define PF_GLINT_DYN_CTL_INTENA_MSK_S 31
#define PF_GLINT_DYN_CTL_INTENA_MSK_M BIT(PF_GLINT_DYN_CTL_INTENA_MSK_S)
/* Generic registers */
#define PF_INT_DIR_OICR_ENA 0x08406000
#define PF_INT_DIR_OICR_ENA_S 0
#define PF_INT_DIR_OICR_ENA_M GENMASK(31, 0)
#define PF_INT_DIR_OICR 0x08406004
#define PF_INT_DIR_OICR_TSYN_EVNT 0
#define PF_INT_DIR_OICR_PHY_TS_0 BIT(1)
#define PF_INT_DIR_OICR_PHY_TS_1 BIT(2)
#define PF_INT_DIR_OICR_CAUSE 0x08406008
#define PF_INT_DIR_OICR_CAUSE_CAUSE_S 0
#define PF_INT_DIR_OICR_CAUSE_CAUSE_M GENMASK(31, 0)
#define PF_INT_PBA_CLEAR 0x0840600C
#define PF_FUNC_RID 0x08406010
#define PF_FUNC_RID_FUNCTION_NUMBER_S 0
#define PF_FUNC_RID_FUNCTION_NUMBER_M GENMASK(2, 0)
#define PF_FUNC_RID_DEVICE_NUMBER_S 3
#define PF_FUNC_RID_DEVICE_NUMBER_M GENMASK(7, 3)
#define PF_FUNC_RID_BUS_NUMBER_S 8
#define PF_FUNC_RID_BUS_NUMBER_M GENMASK(15, 8)
/* Reset registers */
#define PFGEN_RTRIG 0x08407000
#define PFGEN_RTRIG_CORER_S 0
......
......@@ -62,4 +62,42 @@
#define VF_QRXB_TAIL_BASE 0x00060000
#define VF_QRXB_TAIL(_QRX) (VF_QRXB_TAIL_BASE + ((_QRX) * 4))
/* Interrupts */
#define VF_INT_DYN_CTL0 0x00005C00
#define VF_INT_DYN_CTL0_INTENA_S 0
#define VF_INT_DYN_CTL0_INTENA_M BIT(VF_INT_DYN_CTL0_INTENA_S)
#define VF_INT_DYN_CTL0_ITR_INDX_S 3
#define VF_INT_DYN_CTL0_ITR_INDX_M GENMASK(4, 3)
#define VF_INT_DYN_CTLN(_INT) (0x00003800 + ((_INT) * 4))
#define VF_INT_DYN_CTLN_EXT(_INT) (0x00070000 + ((_INT) * 4))
#define VF_INT_DYN_CTLN_INTENA_S 0
#define VF_INT_DYN_CTLN_INTENA_M BIT(VF_INT_DYN_CTLN_INTENA_S)
#define VF_INT_DYN_CTLN_CLEARPBA_S 1
#define VF_INT_DYN_CTLN_CLEARPBA_M BIT(VF_INT_DYN_CTLN_CLEARPBA_S)
#define VF_INT_DYN_CTLN_SWINT_TRIG_S 2
#define VF_INT_DYN_CTLN_SWINT_TRIG_M BIT(VF_INT_DYN_CTLN_SWINT_TRIG_S)
#define VF_INT_DYN_CTLN_ITR_INDX_S 3
#define VF_INT_DYN_CTLN_ITR_INDX_M GENMASK(4, 3)
#define VF_INT_DYN_CTLN_INTERVAL_S 5
#define VF_INT_DYN_CTLN_INTERVAL_M BIT(VF_INT_DYN_CTLN_INTERVAL_S)
#define VF_INT_DYN_CTLN_SW_ITR_INDX_ENA_S 24
#define VF_INT_DYN_CTLN_SW_ITR_INDX_ENA_M BIT(VF_INT_DYN_CTLN_SW_ITR_INDX_ENA_S)
#define VF_INT_DYN_CTLN_SW_ITR_INDX_S 25
#define VF_INT_DYN_CTLN_SW_ITR_INDX_M BIT(VF_INT_DYN_CTLN_SW_ITR_INDX_S)
#define VF_INT_DYN_CTLN_WB_ON_ITR_S 30
#define VF_INT_DYN_CTLN_WB_ON_ITR_M BIT(VF_INT_DYN_CTLN_WB_ON_ITR_S)
#define VF_INT_DYN_CTLN_INTENA_MSK_S 31
#define VF_INT_DYN_CTLN_INTENA_MSK_M BIT(VF_INT_DYN_CTLN_INTENA_MSK_S)
#define VF_INT_ICR0_ENA1 0x00005000
#define VF_INT_ICR0_ENA1_ADMINQ_S 30
#define VF_INT_ICR0_ENA1_ADMINQ_M BIT(VF_INT_ICR0_ENA1_ADMINQ_S)
#define VF_INT_ICR0_ENA1_RSVD_S 31
#define VF_INT_ICR01 0x00004800
#define VF_QF_HENA(_i) (0x0000C400 + ((_i) * 4))
#define VF_QF_HENA_MAX_INDX 1
#define VF_QF_HKEY(_i) (0x0000CC00 + ((_i) * 4))
#define VF_QF_HKEY_MAX_INDX 12
#define VF_QF_HLUT(_i) (0x0000D000 + ((_i) * 4))
#define VF_QF_HLUT_MAX_INDX 15
#endif
......@@ -3,6 +3,340 @@
#include "idpf.h"
const char * const idpf_vport_vc_state_str[] = {
IDPF_FOREACH_VPORT_VC_STATE(IDPF_GEN_STRING)
};
/**
* idpf_init_vector_stack - Fill the MSIX vector stack with vector index
* @adapter: private data struct
*
* Return 0 on success, error on failure
*/
static int idpf_init_vector_stack(struct idpf_adapter *adapter)
{
struct idpf_vector_lifo *stack;
u16 min_vec;
u32 i;
mutex_lock(&adapter->vector_lock);
min_vec = adapter->num_msix_entries - adapter->num_avail_msix;
stack = &adapter->vector_stack;
stack->size = adapter->num_msix_entries;
/* set the base and top to point at start of the 'free pool' to
* distribute the unused vectors on-demand basis
*/
stack->base = min_vec;
stack->top = min_vec;
stack->vec_idx = kcalloc(stack->size, sizeof(u16), GFP_KERNEL);
if (!stack->vec_idx) {
mutex_unlock(&adapter->vector_lock);
return -ENOMEM;
}
for (i = 0; i < stack->size; i++)
stack->vec_idx[i] = i;
mutex_unlock(&adapter->vector_lock);
return 0;
}
/**
* idpf_deinit_vector_stack - zero out the MSIX vector stack
* @adapter: private data struct
*/
static void idpf_deinit_vector_stack(struct idpf_adapter *adapter)
{
struct idpf_vector_lifo *stack;
mutex_lock(&adapter->vector_lock);
stack = &adapter->vector_stack;
kfree(stack->vec_idx);
stack->vec_idx = NULL;
mutex_unlock(&adapter->vector_lock);
}
/**
* idpf_mb_intr_rel_irq - Free the IRQ association with the OS
* @adapter: adapter structure
*
* This will also disable interrupt mode and queue up mailbox task. Mailbox
* task will reschedule itself if not in interrupt mode.
*/
static void idpf_mb_intr_rel_irq(struct idpf_adapter *adapter)
{
clear_bit(IDPF_MB_INTR_MODE, adapter->flags);
free_irq(adapter->msix_entries[0].vector, adapter);
queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0);
}
/**
* idpf_intr_rel - Release interrupt capabilities and free memory
* @adapter: adapter to disable interrupts on
*/
void idpf_intr_rel(struct idpf_adapter *adapter)
{
int err;
if (!adapter->msix_entries)
return;
idpf_mb_intr_rel_irq(adapter);
pci_free_irq_vectors(adapter->pdev);
err = idpf_send_dealloc_vectors_msg(adapter);
if (err)
dev_err(&adapter->pdev->dev,
"Failed to deallocate vectors: %d\n", err);
idpf_deinit_vector_stack(adapter);
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
}
/**
* idpf_mb_intr_clean - Interrupt handler for the mailbox
* @irq: interrupt number
* @data: pointer to the adapter structure
*/
static irqreturn_t idpf_mb_intr_clean(int __always_unused irq, void *data)
{
struct idpf_adapter *adapter = (struct idpf_adapter *)data;
queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0);
return IRQ_HANDLED;
}
/**
* idpf_mb_irq_enable - Enable MSIX interrupt for the mailbox
* @adapter: adapter to get the hardware address for register write
*/
static void idpf_mb_irq_enable(struct idpf_adapter *adapter)
{
struct idpf_intr_reg *intr = &adapter->mb_vector.intr_reg;
u32 val;
val = intr->dyn_ctl_intena_m | intr->dyn_ctl_itridx_m;
writel(val, intr->dyn_ctl);
writel(intr->icr_ena_ctlq_m, intr->icr_ena);
}
/**
* idpf_mb_intr_req_irq - Request irq for the mailbox interrupt
* @adapter: adapter structure to pass to the mailbox irq handler
*/
static int idpf_mb_intr_req_irq(struct idpf_adapter *adapter)
{
struct idpf_q_vector *mb_vector = &adapter->mb_vector;
int irq_num, mb_vidx = 0, err;
irq_num = adapter->msix_entries[mb_vidx].vector;
mb_vector->name = kasprintf(GFP_KERNEL, "%s-%s-%d",
dev_driver_string(&adapter->pdev->dev),
"Mailbox", mb_vidx);
err = request_irq(irq_num, adapter->irq_mb_handler, 0,
mb_vector->name, adapter);
if (err) {
dev_err(&adapter->pdev->dev,
"IRQ request for mailbox failed, error: %d\n", err);
return err;
}
set_bit(IDPF_MB_INTR_MODE, adapter->flags);
return 0;
}
/**
* idpf_set_mb_vec_id - Set vector index for mailbox
* @adapter: adapter structure to access the vector chunks
*
* The first vector id in the requested vector chunks from the CP is for
* the mailbox
*/
static void idpf_set_mb_vec_id(struct idpf_adapter *adapter)
{
if (adapter->req_vec_chunks)
adapter->mb_vector.v_idx =
le16_to_cpu(adapter->caps.mailbox_vector_id);
else
adapter->mb_vector.v_idx = 0;
}
/**
* idpf_mb_intr_init - Initialize the mailbox interrupt
* @adapter: adapter structure to store the mailbox vector
*/
static int idpf_mb_intr_init(struct idpf_adapter *adapter)
{
adapter->dev_ops.reg_ops.mb_intr_reg_init(adapter);
adapter->irq_mb_handler = idpf_mb_intr_clean;
return idpf_mb_intr_req_irq(adapter);
}
/**
* idpf_intr_req - Request interrupt capabilities
* @adapter: adapter to enable interrupts on
*
* Returns 0 on success, negative on failure
*/
int idpf_intr_req(struct idpf_adapter *adapter)
{
u16 default_vports = idpf_get_default_vports(adapter);
int num_q_vecs, total_vecs, num_vec_ids;
int min_vectors, v_actual, err;
unsigned int vector;
u16 *vecids;
total_vecs = idpf_get_reserved_vecs(adapter);
num_q_vecs = total_vecs - IDPF_MBX_Q_VEC;
err = idpf_send_alloc_vectors_msg(adapter, num_q_vecs);
if (err) {
dev_err(&adapter->pdev->dev,
"Failed to allocate %d vectors: %d\n", num_q_vecs, err);
return -EAGAIN;
}
min_vectors = IDPF_MBX_Q_VEC + IDPF_MIN_Q_VEC * default_vports;
v_actual = pci_alloc_irq_vectors(adapter->pdev, min_vectors,
total_vecs, PCI_IRQ_MSIX);
if (v_actual < min_vectors) {
dev_err(&adapter->pdev->dev, "Failed to allocate MSIX vectors: %d\n",
v_actual);
err = -EAGAIN;
goto send_dealloc_vecs;
}
adapter->msix_entries = kcalloc(v_actual, sizeof(struct msix_entry),
GFP_KERNEL);
if (!adapter->msix_entries) {
err = -ENOMEM;
goto free_irq;
}
idpf_set_mb_vec_id(adapter);
vecids = kcalloc(total_vecs, sizeof(u16), GFP_KERNEL);
if (!vecids) {
err = -ENOMEM;
goto free_msix;
}
if (adapter->req_vec_chunks) {
struct virtchnl2_vector_chunks *vchunks;
struct virtchnl2_alloc_vectors *ac;
ac = adapter->req_vec_chunks;
vchunks = &ac->vchunks;
num_vec_ids = idpf_get_vec_ids(adapter, vecids, total_vecs,
vchunks);
if (num_vec_ids < v_actual) {
err = -EINVAL;
goto free_vecids;
}
} else {
int i;
for (i = 0; i < v_actual; i++)
vecids[i] = i;
}
for (vector = 0; vector < v_actual; vector++) {
adapter->msix_entries[vector].entry = vecids[vector];
adapter->msix_entries[vector].vector =
pci_irq_vector(adapter->pdev, vector);
}
adapter->num_req_msix = total_vecs;
adapter->num_msix_entries = v_actual;
/* 'num_avail_msix' is used to distribute excess vectors to the vports
* after considering the minimum vectors required per each default
* vport
*/
adapter->num_avail_msix = v_actual - min_vectors;
/* Fill MSIX vector lifo stack with vector indexes */
err = idpf_init_vector_stack(adapter);
if (err)
goto free_vecids;
err = idpf_mb_intr_init(adapter);
if (err)
goto deinit_vec_stack;
idpf_mb_irq_enable(adapter);
kfree(vecids);
return 0;
deinit_vec_stack:
idpf_deinit_vector_stack(adapter);
free_vecids:
kfree(vecids);
free_msix:
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
free_irq:
pci_free_irq_vectors(adapter->pdev);
send_dealloc_vecs:
idpf_send_dealloc_vectors_msg(adapter);
return err;
}
/**
* idpf_mbx_task - Delayed task to handle mailbox responses
* @work: work_struct handle
*/
void idpf_mbx_task(struct work_struct *work)
{
struct idpf_adapter *adapter;
adapter = container_of(work, struct idpf_adapter, mbx_task.work);
if (test_bit(IDPF_MB_INTR_MODE, adapter->flags))
idpf_mb_irq_enable(adapter);
else
queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task,
msecs_to_jiffies(300));
idpf_recv_mb_msg(adapter, VIRTCHNL2_OP_UNKNOWN, NULL, 0);
}
/**
* idpf_service_task - Delayed task for handling mailbox responses
* @work: work_struct handle to our data
*
*/
void idpf_service_task(struct work_struct *work)
{
struct idpf_adapter *adapter;
adapter = container_of(work, struct idpf_adapter, serv_task.work);
if (idpf_is_reset_detected(adapter) &&
!idpf_is_reset_in_prog(adapter) &&
!test_bit(IDPF_REMOVE_IN_PROG, adapter->flags)) {
dev_info(&adapter->pdev->dev, "HW reset detected\n");
set_bit(IDPF_HR_FUNC_RESET, adapter->flags);
queue_delayed_work(adapter->vc_event_wq,
&adapter->vc_event_task,
msecs_to_jiffies(10));
}
queue_delayed_work(adapter->serv_wq, &adapter->serv_task,
msecs_to_jiffies(300));
}
/**
* idpf_check_reset_complete - check that reset is complete
* @hw: pointer to hw struct
......@@ -61,6 +395,7 @@ static int idpf_init_hard_reset(struct idpf_adapter *adapter)
} else if (test_and_clear_bit(IDPF_HR_FUNC_RESET, adapter->flags)) {
bool is_reset = idpf_is_reset_detected(adapter);
idpf_vc_core_deinit(adapter);
if (!is_reset)
reg_ops->trigger_reset(adapter, IDPF_HR_FUNC_RESET);
idpf_deinit_dflt_mbx(adapter);
......@@ -80,8 +415,19 @@ static int idpf_init_hard_reset(struct idpf_adapter *adapter)
/* Reset is complete and so start building the driver resources again */
err = idpf_init_dflt_mbx(adapter);
if (err)
if (err) {
dev_err(dev, "Failed to initialize default mailbox: %d\n", err);
goto unlock_mutex;
}
/* Initialize the state machine, also allocate memory and request
* resources
*/
err = idpf_vc_core_init(adapter);
if (err) {
idpf_deinit_dflt_mbx(adapter);
goto unlock_mutex;
}
unlock_mutex:
mutex_unlock(&adapter->vport_ctrl_lock);
......
......@@ -25,12 +25,17 @@ static void idpf_remove(struct pci_dev *pdev)
* end up in bad state.
*/
cancel_delayed_work_sync(&adapter->vc_event_task);
idpf_vc_core_deinit(adapter);
/* Be a good citizen and leave the device clean on exit */
adapter->dev_ops.reg_ops.trigger_reset(adapter, IDPF_HR_FUNC_RESET);
idpf_deinit_dflt_mbx(adapter);
destroy_workqueue(adapter->serv_wq);
destroy_workqueue(adapter->mbx_wq);
destroy_workqueue(adapter->vc_event_wq);
mutex_destroy(&adapter->vport_ctrl_lock);
mutex_destroy(&adapter->vector_lock);
mutex_destroy(&adapter->vc_buf_lock);
pci_set_drvdata(pdev, NULL);
kfree(adapter);
......@@ -125,13 +130,31 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
pci_set_drvdata(pdev, adapter);
adapter->serv_wq = alloc_workqueue("%s-%s-service", 0, 0,
dev_driver_string(dev),
dev_name(dev));
if (!adapter->serv_wq) {
dev_err(dev, "Failed to allocate service workqueue\n");
err = -ENOMEM;
goto err_free;
}
adapter->mbx_wq = alloc_workqueue("%s-%s-mbx", 0, 0,
dev_driver_string(dev),
dev_name(dev));
if (!adapter->mbx_wq) {
dev_err(dev, "Failed to allocate mailbox workqueue\n");
err = -ENOMEM;
goto err_mbx_wq_alloc;
}
adapter->vc_event_wq = alloc_workqueue("%s-%s-vc_event", 0, 0,
dev_driver_string(dev),
dev_name(dev));
if (!adapter->vc_event_wq) {
dev_err(dev, "Failed to allocate virtchnl event workqueue\n");
err = -ENOMEM;
goto err_free;
goto err_vc_event_wq_alloc;
}
/* setup msglvl */
......@@ -145,7 +168,13 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
mutex_init(&adapter->vport_ctrl_lock);
mutex_init(&adapter->vector_lock);
mutex_init(&adapter->vc_buf_lock);
init_waitqueue_head(&adapter->vchnl_wq);
INIT_DELAYED_WORK(&adapter->serv_task, idpf_service_task);
INIT_DELAYED_WORK(&adapter->mbx_task, idpf_mbx_task);
INIT_DELAYED_WORK(&adapter->vc_event_task, idpf_vc_event_task);
adapter->dev_ops.reg_ops.reset_reg_init(adapter);
......@@ -157,6 +186,10 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err_cfg_hw:
destroy_workqueue(adapter->vc_event_wq);
err_vc_event_wq_alloc:
destroy_workqueue(adapter->mbx_wq);
err_mbx_wq_alloc:
destroy_workqueue(adapter->serv_wq);
err_free:
kfree(adapter);
return err;
......
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (C) 2023 Intel Corporation */
#ifndef _IDPF_TXRX_H_
#define _IDPF_TXRX_H_
/* Default vector sharing */
#define IDPF_MBX_Q_VEC 1
#define IDPF_MIN_Q_VEC 1
/**
* struct idpf_intr_reg
* @dyn_ctl: Dynamic control interrupt register
* @dyn_ctl_intena_m: Mask for dyn_ctl interrupt enable
* @dyn_ctl_itridx_m: Mask for ITR index
* @icr_ena: Interrupt cause register offset
* @icr_ena_ctlq_m: Mask for ICR
*/
struct idpf_intr_reg {
void __iomem *dyn_ctl;
u32 dyn_ctl_intena_m;
u32 dyn_ctl_itridx_m;
void __iomem *icr_ena;
u32 icr_ena_ctlq_m;
};
/**
* struct idpf_q_vector
* @v_idx: Vector index
* @intr_reg: See struct idpf_intr_reg
* @name: Queue vector name
*/
struct idpf_q_vector {
u16 v_idx;
struct idpf_intr_reg intr_reg;
char *name;
};
#endif /* !_IDPF_TXRX_H_ */
......@@ -44,6 +44,22 @@ static void idpf_vf_ctlq_reg_init(struct idpf_ctlq_create_info *cq)
}
}
/**
* idpf_vf_mb_intr_reg_init - Initialize the mailbox register
* @adapter: adapter structure
*/
static void idpf_vf_mb_intr_reg_init(struct idpf_adapter *adapter)
{
struct idpf_intr_reg *intr = &adapter->mb_vector.intr_reg;
u32 dyn_ctl = le32_to_cpu(adapter->caps.mailbox_dyn_ctl);
intr->dyn_ctl = idpf_get_reg_addr(adapter, dyn_ctl);
intr->dyn_ctl_intena_m = VF_INT_DYN_CTL0_INTENA_M;
intr->dyn_ctl_itridx_m = VF_INT_DYN_CTL0_ITR_INDX_M;
intr->icr_ena = idpf_get_reg_addr(adapter, VF_INT_ICR0_ENA1);
intr->icr_ena_ctlq_m = VF_INT_ICR0_ENA1_ADMINQ_M;
}
/**
* idpf_vf_reset_reg_init - Initialize reset registers
* @adapter: Driver specific private structure
......@@ -62,7 +78,10 @@ static void idpf_vf_reset_reg_init(struct idpf_adapter *adapter)
static void idpf_vf_trigger_reset(struct idpf_adapter *adapter,
enum idpf_flags trig_cause)
{
/* stub */
/* Do not send VIRTCHNL2_OP_RESET_VF message on driver unload */
if (trig_cause == IDPF_HR_FUNC_RESET &&
!test_bit(IDPF_REMOVE_IN_PROG, adapter->flags))
idpf_send_mb_msg(adapter, VIRTCHNL2_OP_RESET_VF, 0, NULL);
}
/**
......@@ -72,6 +91,7 @@ static void idpf_vf_trigger_reset(struct idpf_adapter *adapter,
static void idpf_vf_reg_ops_init(struct idpf_adapter *adapter)
{
adapter->dev_ops.reg_ops.ctlq_reg_init = idpf_vf_ctlq_reg_init;
adapter->dev_ops.reg_ops.mb_intr_reg_init = idpf_vf_mb_intr_reg_init;
adapter->dev_ops.reg_ops.reset_reg_init = idpf_vf_reset_reg_init;
adapter->dev_ops.reg_ops.trigger_reset = idpf_vf_trigger_reset;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment