Commit e1ffcc66 authored by Dimitris Michailidis's avatar Dimitris Michailidis Committed by David S. Miller

net/fungible: Add service module for Fungible drivers

Fungible cards have a number of different PCI functions and thus
different drivers, all of which use a common method to initialize and
interact with the device. This commit adds a library module that
collects these common mechanisms. They mainly deal with device
initialization, setting up and destroying queues, and operating an admin
queue. A subset of the FW interface is also included here.
Signed-off-by: default avatarDimitris Michailidis <dmichail@fungible.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent e8eb9e32
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
obj-$(CONFIG_FUN_CORE) += funcore.o
funcore-y := fun_dev.o fun_queue.o
This diff is collapsed.
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
#ifndef _FUNDEV_H
#define _FUNDEV_H
#include <linux/sbitmap.h>
#include <linux/spinlock_types.h>
#include <linux/workqueue.h>
#include "fun_hci.h"
struct pci_dev;
struct fun_dev;
struct fun_queue;
struct fun_cmd_ctx;
struct fun_queue_alloc_req;
/* doorbell fields */
enum {
FUN_DB_QIDX_S = 0,
FUN_DB_INTCOAL_ENTRIES_S = 16,
FUN_DB_INTCOAL_ENTRIES_M = 0x7f,
FUN_DB_INTCOAL_USEC_S = 23,
FUN_DB_INTCOAL_USEC_M = 0x7f,
FUN_DB_IRQ_S = 30,
FUN_DB_IRQ_F = 1 << FUN_DB_IRQ_S,
FUN_DB_IRQ_ARM_S = 31,
FUN_DB_IRQ_ARM_F = 1U << FUN_DB_IRQ_ARM_S
};
/* Callback for asynchronous admin commands.
* Invoked on reception of command response.
*/
typedef void (*fun_admin_callback_t)(struct fun_dev *fdev, void *rsp,
void *cb_data);
/* Callback for events/notifications received by an admin queue. */
typedef void (*fun_admin_event_cb)(struct fun_dev *fdev, void *cqe);
/* Callback for pending work handled by the service task. */
typedef void (*fun_serv_cb)(struct fun_dev *fd);
/* service task flags */
enum {
FUN_SERV_DISABLED, /* service task is disabled */
FUN_SERV_FIRST_AVAIL
};
/* Driver state associated with a PCI function. */
struct fun_dev {
struct device *dev;
void __iomem *bar; /* start of BAR0 mapping */
u32 __iomem *dbs; /* start of doorbells in BAR0 mapping */
/* admin queue */
struct fun_queue *admin_q;
struct sbitmap_queue admin_sbq;
struct fun_cmd_ctx *cmd_ctx;
fun_admin_event_cb adminq_cb;
bool suppress_cmds; /* if set don't write commands to SQ */
/* address increment between consecutive doorbells, in 4B units */
unsigned int db_stride;
/* SW versions of device registers */
u32 cc_reg; /* CC register */
u64 cap_reg; /* CAPability register */
unsigned int q_depth; /* max queue depth supported by device */
unsigned int max_qid; /* = #queues - 1, separately for SQs and CQs */
unsigned int kern_end_qid; /* last qid in the kernel range + 1 */
unsigned int fw_handle;
/* IRQ manager */
unsigned int num_irqs;
unsigned int irqs_avail;
spinlock_t irqmgr_lock;
unsigned long *irq_map;
/* The service task handles work that needs a process context */
struct work_struct service_task;
unsigned long service_flags;
fun_serv_cb serv_cb;
};
struct fun_dev_params {
u8 cqe_size_log2; /* admin q CQE size */
u8 sqe_size_log2; /* admin q SQE size */
/* admin q depths */
u16 cq_depth;
u16 sq_depth;
u16 rq_depth;
u16 min_msix; /* min vectors needed by requesting driver */
fun_admin_event_cb event_cb;
fun_serv_cb serv_cb;
};
/* Return the BAR address of a doorbell. */
static inline u32 __iomem *fun_db_addr(const struct fun_dev *fdev,
unsigned int db_index)
{
return &fdev->dbs[db_index * fdev->db_stride];
}
/* Return the BAR address of an SQ doorbell. SQ and CQ DBs alternate,
* SQs have even DB indices.
*/
static inline u32 __iomem *fun_sq_db_addr(const struct fun_dev *fdev,
unsigned int sqid)
{
return fun_db_addr(fdev, sqid * 2);
}
static inline u32 __iomem *fun_cq_db_addr(const struct fun_dev *fdev,
unsigned int cqid)
{
return fun_db_addr(fdev, cqid * 2 + 1);
}
int fun_get_res_count(struct fun_dev *fdev, enum fun_admin_op res);
int fun_res_destroy(struct fun_dev *fdev, enum fun_admin_op res,
unsigned int flags, u32 id);
int fun_bind(struct fun_dev *fdev, enum fun_admin_bind_type type0,
unsigned int id0, enum fun_admin_bind_type type1,
unsigned int id1);
int fun_submit_admin_cmd(struct fun_dev *fdev, struct fun_admin_req_common *cmd,
fun_admin_callback_t cb, void *cb_data, bool wait_ok);
int fun_submit_admin_sync_cmd(struct fun_dev *fdev,
struct fun_admin_req_common *cmd, void *rsp,
size_t rspsize, unsigned int timeout);
int fun_dev_enable(struct fun_dev *fdev, struct pci_dev *pdev,
const struct fun_dev_params *areq, const char *name);
void fun_dev_disable(struct fun_dev *fdev);
int fun_reserve_irqs(struct fun_dev *fdev, unsigned int nirqs,
u16 *irq_indices);
void fun_release_irqs(struct fun_dev *fdev, unsigned int nirqs,
u16 *irq_indices);
void fun_serv_stop(struct fun_dev *fd);
void fun_serv_restart(struct fun_dev *fd);
void fun_serv_sched(struct fun_dev *fd);
#endif /* _FUNDEV_H */
This diff is collapsed.
This diff is collapsed.
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
#ifndef _FUN_QEUEUE_H
#define _FUN_QEUEUE_H
#include <linux/interrupt.h>
#include <linux/io.h>
struct device;
struct fun_dev;
struct fun_queue;
struct fun_cqe_info;
struct fun_rsp_common;
typedef void (*cq_callback_t)(struct fun_queue *funq, void *data, void *msg,
const struct fun_cqe_info *info);
struct fun_rq_info {
dma_addr_t dma;
struct page *page;
};
/* A queue group consisting of an SQ, a CQ, and an optional RQ. */
struct fun_queue {
struct fun_dev *fdev;
spinlock_t sq_lock;
dma_addr_t cq_dma_addr;
dma_addr_t sq_dma_addr;
dma_addr_t rq_dma_addr;
u32 __iomem *cq_db;
u32 __iomem *sq_db;
u32 __iomem *rq_db;
void *cqes;
void *sq_cmds;
struct fun_eprq_rqbuf *rqes;
struct fun_rq_info *rq_info;
u32 cqid;
u32 sqid;
u32 rqid;
u32 cq_depth;
u32 sq_depth;
u32 rq_depth;
u16 cq_head;
u16 sq_tail;
u16 rq_tail;
u8 cqe_size_log2;
u8 sqe_size_log2;
u16 cqe_info_offset;
u16 rq_buf_idx;
int rq_buf_offset;
u16 num_rqe_to_fill;
u8 cq_intcoal_usec;
u8 cq_intcoal_nentries;
u8 sq_intcoal_usec;
u8 sq_intcoal_nentries;
u16 cq_flags;
u16 sq_flags;
u16 rq_flags;
/* SQ head writeback */
u16 sq_comp;
volatile __be64 *sq_head;
cq_callback_t cq_cb;
void *cb_data;
irq_handler_t irq_handler;
void *irq_data;
s16 cq_vector;
u8 cq_phase;
/* I/O q index */
u16 qid;
char irqname[24];
};
static inline void *fun_sqe_at(const struct fun_queue *funq, unsigned int pos)
{
return funq->sq_cmds + (pos << funq->sqe_size_log2);
}
static inline void funq_sq_post_tail(struct fun_queue *funq, u16 tail)
{
if (++tail == funq->sq_depth)
tail = 0;
funq->sq_tail = tail;
writel(tail, funq->sq_db);
}
static inline struct fun_cqe_info *funq_cqe_info(const struct fun_queue *funq,
void *cqe)
{
return cqe + funq->cqe_info_offset;
}
static inline void funq_rq_post(struct fun_queue *funq)
{
writel(funq->rq_tail, funq->rq_db);
}
struct fun_queue_alloc_req {
u8 cqe_size_log2;
u8 sqe_size_log2;
u16 cq_flags;
u16 sq_flags;
u16 rq_flags;
u32 cq_depth;
u32 sq_depth;
u32 rq_depth;
u8 cq_intcoal_usec;
u8 cq_intcoal_nentries;
u8 sq_intcoal_usec;
u8 sq_intcoal_nentries;
};
int fun_sq_create(struct fun_dev *fdev, u16 flags, u32 sqid, u32 cqid,
u8 sqe_size_log2, u32 sq_depth, dma_addr_t dma_addr,
u8 coal_nentries, u8 coal_usec, u32 irq_num,
u32 scan_start_id, u32 scan_end_id,
u32 rq_buf_size_log2, u32 *sqidp, u32 __iomem **dbp);
int fun_cq_create(struct fun_dev *fdev, u16 flags, u32 cqid, u32 rqid,
u8 cqe_size_log2, u32 cq_depth, dma_addr_t dma_addr,
u16 headroom, u16 tailroom, u8 coal_nentries, u8 coal_usec,
u32 irq_num, u32 scan_start_id, u32 scan_end_id,
u32 *cqidp, u32 __iomem **dbp);
void *fun_alloc_ring_mem(struct device *dma_dev, size_t depth,
size_t hw_desc_sz, size_t sw_desc_size, bool wb,
int numa_node, dma_addr_t *dma_addr, void **sw_va,
volatile __be64 **wb_va);
void fun_free_ring_mem(struct device *dma_dev, size_t depth, size_t hw_desc_sz,
bool wb, void *hw_va, dma_addr_t dma_addr, void *sw_va);
#define fun_destroy_sq(fdev, sqid) \
fun_res_destroy((fdev), FUN_ADMIN_OP_EPSQ, 0, (sqid))
#define fun_destroy_cq(fdev, cqid) \
fun_res_destroy((fdev), FUN_ADMIN_OP_EPCQ, 0, (cqid))
struct fun_queue *fun_alloc_queue(struct fun_dev *fdev, int qid,
const struct fun_queue_alloc_req *req);
void fun_free_queue(struct fun_queue *funq);
static inline void fun_set_cq_callback(struct fun_queue *funq, cq_callback_t cb,
void *cb_data)
{
funq->cq_cb = cb;
funq->cb_data = cb_data;
}
int fun_create_rq(struct fun_queue *funq);
int fun_create_queue(struct fun_queue *funq);
void fun_free_irq(struct fun_queue *funq);
int fun_request_irq(struct fun_queue *funq, const char *devname,
irq_handler_t handler, void *data);
unsigned int __fun_process_cq(struct fun_queue *funq, unsigned int max);
unsigned int fun_process_cq(struct fun_queue *funq, unsigned int max);
#endif /* _FUN_QEUEUE_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment