Commit ed8ccaef authored by Tadeusz Struk's avatar Tadeusz Struk Committed by Herbert Xu

crypto: qat - Add support for SRIOV

Add code that enables SRIOV on dh895xcc devices.
Signed-off-by: default avatarTadeusz Struk <tadeusz.struk@intel.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent a5733139
...@@ -19,3 +19,4 @@ intel_qat-objs := adf_cfg.o \ ...@@ -19,3 +19,4 @@ intel_qat-objs := adf_cfg.o \
qat_hal.o qat_hal.o
intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o
intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_pf2vf_msg.o
...@@ -46,13 +46,17 @@ ...@@ -46,13 +46,17 @@
*/ */
#ifndef ADF_ACCEL_DEVICES_H_ #ifndef ADF_ACCEL_DEVICES_H_
#define ADF_ACCEL_DEVICES_H_ #define ADF_ACCEL_DEVICES_H_
#include <linux/interrupt.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/ratelimit.h>
#include "adf_cfg_common.h" #include "adf_cfg_common.h"
#define ADF_DH895XCC_DEVICE_NAME "dh895xcc" #define ADF_DH895XCC_DEVICE_NAME "dh895xcc"
#define ADF_DH895XCCVF_DEVICE_NAME "dh895xccvf"
#define ADF_DH895XCC_PCI_DEVICE_ID 0x435 #define ADF_DH895XCC_PCI_DEVICE_ID 0x435
#define ADF_DH895XCCIOV_PCI_DEVICE_ID 0x443
#define ADF_PCI_MAX_BARS 3 #define ADF_PCI_MAX_BARS 3
#define ADF_DEVICE_NAME_LENGTH 32 #define ADF_DEVICE_NAME_LENGTH 32
#define ADF_ETR_MAX_RINGS_PER_BANK 16 #define ADF_ETR_MAX_RINGS_PER_BANK 16
...@@ -79,6 +83,7 @@ struct adf_bar { ...@@ -79,6 +83,7 @@ struct adf_bar {
struct adf_accel_msix { struct adf_accel_msix {
struct msix_entry *entries; struct msix_entry *entries;
char **names; char **names;
u32 num_entries;
} __packed; } __packed;
struct adf_accel_pci { struct adf_accel_pci {
...@@ -99,6 +104,7 @@ enum dev_sku_info { ...@@ -99,6 +104,7 @@ enum dev_sku_info {
DEV_SKU_2, DEV_SKU_2,
DEV_SKU_3, DEV_SKU_3,
DEV_SKU_4, DEV_SKU_4,
DEV_SKU_VF,
DEV_SKU_UNKNOWN, DEV_SKU_UNKNOWN,
}; };
...@@ -113,6 +119,8 @@ static inline const char *get_sku_info(enum dev_sku_info info) ...@@ -113,6 +119,8 @@ static inline const char *get_sku_info(enum dev_sku_info info)
return "SKU3"; return "SKU3";
case DEV_SKU_4: case DEV_SKU_4:
return "SKU4"; return "SKU4";
case DEV_SKU_VF:
return "SKUVF";
case DEV_SKU_UNKNOWN: case DEV_SKU_UNKNOWN:
default: default:
break; break;
...@@ -140,6 +148,8 @@ struct adf_hw_device_data { ...@@ -140,6 +148,8 @@ struct adf_hw_device_data {
uint32_t (*get_etr_bar_id)(struct adf_hw_device_data *self); uint32_t (*get_etr_bar_id)(struct adf_hw_device_data *self);
uint32_t (*get_num_aes)(struct adf_hw_device_data *self); uint32_t (*get_num_aes)(struct adf_hw_device_data *self);
uint32_t (*get_num_accels)(struct adf_hw_device_data *self); uint32_t (*get_num_accels)(struct adf_hw_device_data *self);
uint32_t (*get_pf2vf_offset)(uint32_t i);
uint32_t (*get_vintmsk_offset)(uint32_t i);
enum dev_sku_info (*get_sku)(struct adf_hw_device_data *self); enum dev_sku_info (*get_sku)(struct adf_hw_device_data *self);
int (*alloc_irq)(struct adf_accel_dev *accel_dev); int (*alloc_irq)(struct adf_accel_dev *accel_dev);
void (*free_irq)(struct adf_accel_dev *accel_dev); void (*free_irq)(struct adf_accel_dev *accel_dev);
...@@ -151,7 +161,9 @@ struct adf_hw_device_data { ...@@ -151,7 +161,9 @@ struct adf_hw_device_data {
void (*exit_arb)(struct adf_accel_dev *accel_dev); void (*exit_arb)(struct adf_accel_dev *accel_dev);
void (*get_arb_mapping)(struct adf_accel_dev *accel_dev, void (*get_arb_mapping)(struct adf_accel_dev *accel_dev,
const uint32_t **cfg); const uint32_t **cfg);
void (*disable_iov)(struct adf_accel_dev *accel_dev);
void (*enable_ints)(struct adf_accel_dev *accel_dev); void (*enable_ints)(struct adf_accel_dev *accel_dev);
int (*enable_vf2pf_comms)(struct adf_accel_dev *accel_dev);
const char *fw_name; const char *fw_name;
const char *fw_mmp_name; const char *fw_mmp_name;
uint32_t fuses; uint32_t fuses;
...@@ -165,6 +177,7 @@ struct adf_hw_device_data { ...@@ -165,6 +177,7 @@ struct adf_hw_device_data {
uint8_t num_accel; uint8_t num_accel;
uint8_t num_logical_accel; uint8_t num_logical_accel;
uint8_t num_engines; uint8_t num_engines;
uint8_t min_iov_compat_ver;
} __packed; } __packed;
/* CSR write macro */ /* CSR write macro */
...@@ -189,6 +202,15 @@ struct adf_fw_loader_data { ...@@ -189,6 +202,15 @@ struct adf_fw_loader_data {
const struct firmware *mmp_fw; const struct firmware *mmp_fw;
}; };
struct adf_accel_vf_info {
struct adf_accel_dev *accel_dev;
struct tasklet_struct vf2pf_bh_tasklet;
struct mutex pf2vf_lock; /* protect CSR access for PF2VF messages */
struct ratelimit_state vf2pf_ratelimit;
u32 vf_nr;
bool init;
};
struct adf_accel_dev { struct adf_accel_dev {
struct adf_etr_data *transport; struct adf_etr_data *transport;
struct adf_hw_device_data *hw_device; struct adf_hw_device_data *hw_device;
...@@ -202,6 +224,21 @@ struct adf_accel_dev { ...@@ -202,6 +224,21 @@ struct adf_accel_dev {
struct list_head list; struct list_head list;
struct module *owner; struct module *owner;
struct adf_accel_pci accel_pci_dev; struct adf_accel_pci accel_pci_dev;
union {
struct {
/* vf_info is non-zero when SR-IOV is init'ed */
struct adf_accel_vf_info *vf_info;
} pf;
struct {
char *irq_name;
struct tasklet_struct pf2vf_bh_tasklet;
struct mutex vf2pf_lock; /* protect CSR access */
struct completion iov_msg_completion;
uint8_t compatible;
uint8_t pf_version;
} vf;
};
bool is_vf;
uint8_t accel_id; uint8_t accel_id;
} __packed; } __packed;
#endif #endif
...@@ -91,6 +91,9 @@ static void adf_dev_restore(struct adf_accel_dev *accel_dev) ...@@ -91,6 +91,9 @@ static void adf_dev_restore(struct adf_accel_dev *accel_dev)
dev_info(&GET_DEV(accel_dev), "Resetting device qat_dev%d\n", dev_info(&GET_DEV(accel_dev), "Resetting device qat_dev%d\n",
accel_dev->accel_id); accel_dev->accel_id);
if (!parent)
parent = pdev;
if (!pci_wait_for_pending_transaction(pdev)) if (!pci_wait_for_pending_transaction(pdev))
dev_info(&GET_DEV(accel_dev), dev_info(&GET_DEV(accel_dev),
"Transaction still in progress. Proceeding\n"); "Transaction still in progress. Proceeding\n");
......
...@@ -178,6 +178,9 @@ void adf_cfg_dev_remove(struct adf_accel_dev *accel_dev) ...@@ -178,6 +178,9 @@ void adf_cfg_dev_remove(struct adf_accel_dev *accel_dev)
{ {
struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg; struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg;
if (!dev_cfg_data)
return;
down_write(&dev_cfg_data->lock); down_write(&dev_cfg_data->lock);
adf_cfg_section_del_all(&dev_cfg_data->sec_list); adf_cfg_section_del_all(&dev_cfg_data->sec_list);
up_write(&dev_cfg_data->lock); up_write(&dev_cfg_data->lock);
......
...@@ -60,7 +60,7 @@ ...@@ -60,7 +60,7 @@
#define ADF_CFG_NO_DEVICE 0xFF #define ADF_CFG_NO_DEVICE 0xFF
#define ADF_CFG_AFFINITY_WHATEVER 0xFF #define ADF_CFG_AFFINITY_WHATEVER 0xFF
#define MAX_DEVICE_NAME_SIZE 32 #define MAX_DEVICE_NAME_SIZE 32
#define ADF_MAX_DEVICES 32 #define ADF_MAX_DEVICES (32 * 32)
enum adf_cfg_val_type { enum adf_cfg_val_type {
ADF_DEC, ADF_DEC,
...@@ -71,6 +71,7 @@ enum adf_cfg_val_type { ...@@ -71,6 +71,7 @@ enum adf_cfg_val_type {
enum adf_device_type { enum adf_device_type {
DEV_UNKNOWN = 0, DEV_UNKNOWN = 0,
DEV_DH895XCC, DEV_DH895XCC,
DEV_DH895XCCVF,
}; };
struct adf_dev_status_info { struct adf_dev_status_info {
......
...@@ -54,8 +54,8 @@ ...@@ -54,8 +54,8 @@
#include "icp_qat_hal.h" #include "icp_qat_hal.h"
#define ADF_MAJOR_VERSION 0 #define ADF_MAJOR_VERSION 0
#define ADF_MINOR_VERSION 1 #define ADF_MINOR_VERSION 2
#define ADF_BUILD_VERSION 4 #define ADF_BUILD_VERSION 0
#define ADF_DRV_VERSION __stringify(ADF_MAJOR_VERSION) "." \ #define ADF_DRV_VERSION __stringify(ADF_MAJOR_VERSION) "." \
__stringify(ADF_MINOR_VERSION) "." \ __stringify(ADF_MINOR_VERSION) "." \
__stringify(ADF_BUILD_VERSION) __stringify(ADF_BUILD_VERSION)
...@@ -95,7 +95,7 @@ struct service_hndl { ...@@ -95,7 +95,7 @@ struct service_hndl {
static inline int get_current_node(void) static inline int get_current_node(void)
{ {
return cpu_data(current_thread_info()->cpu).phys_proc_id; return topology_physical_package_id(smp_processor_id());
} }
int adf_service_register(struct service_hndl *service); int adf_service_register(struct service_hndl *service);
...@@ -106,13 +106,23 @@ int adf_dev_start(struct adf_accel_dev *accel_dev); ...@@ -106,13 +106,23 @@ int adf_dev_start(struct adf_accel_dev *accel_dev);
int adf_dev_stop(struct adf_accel_dev *accel_dev); int adf_dev_stop(struct adf_accel_dev *accel_dev);
void adf_dev_shutdown(struct adf_accel_dev *accel_dev); void adf_dev_shutdown(struct adf_accel_dev *accel_dev);
void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr);
void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev);
int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev);
void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data);
void adf_clean_vf_map(bool);
int adf_ctl_dev_register(void); int adf_ctl_dev_register(void);
void adf_ctl_dev_unregister(void); void adf_ctl_dev_unregister(void);
int adf_processes_dev_register(void); int adf_processes_dev_register(void);
void adf_processes_dev_unregister(void); void adf_processes_dev_unregister(void);
int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev); int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev,
void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev); struct adf_accel_dev *pf);
void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev,
struct adf_accel_dev *pf);
struct list_head *adf_devmgr_get_head(void); struct list_head *adf_devmgr_get_head(void);
struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id); struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id);
struct adf_accel_dev *adf_devmgr_get_first(void); struct adf_accel_dev *adf_devmgr_get_first(void);
...@@ -211,4 +221,21 @@ int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle, ...@@ -211,4 +221,21 @@ int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
void *addr_ptr, int mem_size); void *addr_ptr, int mem_size);
void qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle, void qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle,
void *addr_ptr, int mem_size); void *addr_ptr, int mem_size);
#if defined(CONFIG_PCI_IOV)
int adf_sriov_configure(struct pci_dev *pdev, int numvfs);
void adf_disable_sriov(struct adf_accel_dev *accel_dev);
void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
uint32_t vf_mask);
void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
uint32_t vf_mask);
#else
static inline int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
{
return 0;
}
static inline void adf_disable_sriov(struct adf_accel_dev *accel_dev)
{
}
#endif
#endif #endif
...@@ -398,10 +398,9 @@ static int adf_ctl_ioctl_get_status(struct file *fp, unsigned int cmd, ...@@ -398,10 +398,9 @@ static int adf_ctl_ioctl_get_status(struct file *fp, unsigned int cmd,
} }
accel_dev = adf_devmgr_get_dev_by_id(dev_info.accel_id); accel_dev = adf_devmgr_get_dev_by_id(dev_info.accel_id);
if (!accel_dev) { if (!accel_dev)
pr_err("QAT: Device %d not found\n", dev_info.accel_id);
return -ENODEV; return -ENODEV;
}
hw_data = accel_dev->hw_device; hw_data = accel_dev->hw_device;
dev_info.state = adf_dev_started(accel_dev) ? DEV_UP : DEV_DOWN; dev_info.state = adf_dev_started(accel_dev) ? DEV_UP : DEV_DOWN;
dev_info.num_ae = hw_data->get_num_aes(hw_data); dev_info.num_ae = hw_data->get_num_aes(hw_data);
...@@ -495,6 +494,7 @@ static void __exit adf_unregister_ctl_device_driver(void) ...@@ -495,6 +494,7 @@ static void __exit adf_unregister_ctl_device_driver(void)
adf_exit_aer(); adf_exit_aer();
qat_crypto_unregister(); qat_crypto_unregister();
qat_algs_exit(); qat_algs_exit();
adf_clean_vf_map(false);
mutex_destroy(&adf_ctl_lock); mutex_destroy(&adf_ctl_lock);
} }
......
...@@ -50,21 +50,125 @@ ...@@ -50,21 +50,125 @@
#include "adf_common_drv.h" #include "adf_common_drv.h"
static LIST_HEAD(accel_table); static LIST_HEAD(accel_table);
static LIST_HEAD(vfs_table);
static DEFINE_MUTEX(table_lock); static DEFINE_MUTEX(table_lock);
static uint32_t num_devices; static uint32_t num_devices;
struct vf_id_map {
u32 bdf;
u32 id;
u32 fake_id;
bool attached;
struct list_head list;
};
static int adf_get_vf_id(struct adf_accel_dev *vf)
{
return ((7 * (PCI_SLOT(accel_to_pci_dev(vf)->devfn) - 1)) +
PCI_FUNC(accel_to_pci_dev(vf)->devfn) +
(PCI_SLOT(accel_to_pci_dev(vf)->devfn) - 1));
}
static int adf_get_vf_num(struct adf_accel_dev *vf)
{
return (accel_to_pci_dev(vf)->bus->number << 8) | adf_get_vf_id(vf);
}
static struct vf_id_map *adf_find_vf(u32 bdf)
{
struct list_head *itr;
list_for_each(itr, &vfs_table) {
struct vf_id_map *ptr =
list_entry(itr, struct vf_id_map, list);
if (ptr->bdf == bdf)
return ptr;
}
return NULL;
}
static int adf_get_vf_real_id(u32 fake)
{
struct list_head *itr;
list_for_each(itr, &vfs_table) {
struct vf_id_map *ptr =
list_entry(itr, struct vf_id_map, list);
if (ptr->fake_id == fake)
return ptr->id;
}
return -1;
}
/**
* adf_clean_vf_map() - Cleans VF id mapings
*
* Function cleans internal ids for virtual functions.
* @vf: flag indicating whether mappings is cleaned
* for vfs only or for vfs and pfs
*/
void adf_clean_vf_map(bool vf)
{
struct vf_id_map *map;
struct list_head *ptr, *tmp;
mutex_lock(&table_lock);
list_for_each_safe(ptr, tmp, &vfs_table) {
map = list_entry(ptr, struct vf_id_map, list);
if (map->bdf != -1)
num_devices--;
if (vf && map->bdf == -1)
continue;
list_del(ptr);
kfree(map);
}
mutex_unlock(&table_lock);
}
EXPORT_SYMBOL_GPL(adf_clean_vf_map);
/**
* adf_devmgr_update_class_index() - Update internal index
* @hw_data: Pointer to internal device data.
*
* Function updates internal dev index for VFs
*/
void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data)
{
struct adf_hw_device_class *class = hw_data->dev_class;
struct list_head *itr;
int i = 0;
list_for_each(itr, &accel_table) {
struct adf_accel_dev *ptr =
list_entry(itr, struct adf_accel_dev, list);
if (ptr->hw_device->dev_class == class)
ptr->hw_device->instance_id = i++;
if (i == class->instances)
break;
}
}
EXPORT_SYMBOL_GPL(adf_devmgr_update_class_index);
/** /**
* adf_devmgr_add_dev() - Add accel_dev to the acceleration framework * adf_devmgr_add_dev() - Add accel_dev to the acceleration framework
* @accel_dev: Pointer to acceleration device. * @accel_dev: Pointer to acceleration device.
* @pf: Corresponding PF if the accel_dev is a VF
* *
* Function adds acceleration device to the acceleration framework. * Function adds acceleration device to the acceleration framework.
* To be used by QAT device specific drivers. * To be used by QAT device specific drivers.
* *
* Return: 0 on success, error code otherwise. * Return: 0 on success, error code otherwise.
*/ */
int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev) int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev,
struct adf_accel_dev *pf)
{ {
struct list_head *itr; struct list_head *itr;
int ret = 0;
if (num_devices == ADF_MAX_DEVICES) { if (num_devices == ADF_MAX_DEVICES) {
dev_err(&GET_DEV(accel_dev), "Only support up to %d devices\n", dev_err(&GET_DEV(accel_dev), "Only support up to %d devices\n",
...@@ -73,20 +177,77 @@ int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev) ...@@ -73,20 +177,77 @@ int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev)
} }
mutex_lock(&table_lock); mutex_lock(&table_lock);
list_for_each(itr, &accel_table) { atomic_set(&accel_dev->ref_count, 0);
struct adf_accel_dev *ptr =
/* PF on host or VF on guest */
if (!accel_dev->is_vf || (accel_dev->is_vf && !pf)) {
struct vf_id_map *map;
list_for_each(itr, &accel_table) {
struct adf_accel_dev *ptr =
list_entry(itr, struct adf_accel_dev, list); list_entry(itr, struct adf_accel_dev, list);
if (ptr == accel_dev) { if (ptr == accel_dev) {
mutex_unlock(&table_lock); ret = -EEXIST;
return -EEXIST; goto unlock;
}
} }
list_add_tail(&accel_dev->list, &accel_table);
accel_dev->accel_id = num_devices++;
map = kzalloc(sizeof(*map), GFP_KERNEL);
if (!map) {
ret = -ENOMEM;
goto unlock;
}
map->bdf = ~0;
map->id = accel_dev->accel_id;
map->fake_id = map->id;
map->attached = true;
list_add_tail(&map->list, &vfs_table);
} else if (accel_dev->is_vf && pf) {
/* VF on host */
struct adf_accel_vf_info *vf_info;
struct vf_id_map *map;
vf_info = pf->pf.vf_info + adf_get_vf_id(accel_dev);
map = adf_find_vf(adf_get_vf_num(accel_dev));
if (map) {
struct vf_id_map *next;
accel_dev->accel_id = map->id;
list_add_tail(&accel_dev->list, &accel_table);
map->fake_id++;
map->attached = true;
next = list_next_entry(map, list);
while (next && &next->list != &vfs_table) {
next->fake_id++;
next = list_next_entry(next, list);
}
ret = 0;
goto unlock;
}
map = kzalloc(sizeof(*map), GFP_KERNEL);
if (!map) {
ret = -ENOMEM;
goto unlock;
}
accel_dev->accel_id = num_devices++;
list_add_tail(&accel_dev->list, &accel_table);
map->bdf = adf_get_vf_num(accel_dev);
map->id = accel_dev->accel_id;
map->fake_id = map->id;
map->attached = true;
list_add_tail(&map->list, &vfs_table);
} }
atomic_set(&accel_dev->ref_count, 0); unlock:
list_add_tail(&accel_dev->list, &accel_table);
accel_dev->accel_id = num_devices++;
mutex_unlock(&table_lock); mutex_unlock(&table_lock);
return 0; return ret;
} }
EXPORT_SYMBOL_GPL(adf_devmgr_add_dev); EXPORT_SYMBOL_GPL(adf_devmgr_add_dev);
...@@ -98,17 +259,37 @@ struct list_head *adf_devmgr_get_head(void) ...@@ -98,17 +259,37 @@ struct list_head *adf_devmgr_get_head(void)
/** /**
* adf_devmgr_rm_dev() - Remove accel_dev from the acceleration framework. * adf_devmgr_rm_dev() - Remove accel_dev from the acceleration framework.
* @accel_dev: Pointer to acceleration device. * @accel_dev: Pointer to acceleration device.
* @pf: Corresponding PF if the accel_dev is a VF
* *
* Function removes acceleration device from the acceleration framework. * Function removes acceleration device from the acceleration framework.
* To be used by QAT device specific drivers. * To be used by QAT device specific drivers.
* *
* Return: void * Return: void
*/ */
void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev) void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev,
struct adf_accel_dev *pf)
{ {
mutex_lock(&table_lock); mutex_lock(&table_lock);
if (!accel_dev->is_vf || (accel_dev->is_vf && !pf)) {
num_devices--;
} else if (accel_dev->is_vf && pf) {
struct vf_id_map *map, *next;
map = adf_find_vf(adf_get_vf_num(accel_dev));
if (!map) {
dev_err(&GET_DEV(accel_dev), "Failed to find VF map\n");
goto unlock;
}
map->fake_id--;
map->attached = false;
next = list_next_entry(map, list);
while (next && &next->list != &vfs_table) {
next->fake_id--;
next = list_next_entry(next, list);
}
}
unlock:
list_del(&accel_dev->list); list_del(&accel_dev->list);
num_devices--;
mutex_unlock(&table_lock); mutex_unlock(&table_lock);
} }
EXPORT_SYMBOL_GPL(adf_devmgr_rm_dev); EXPORT_SYMBOL_GPL(adf_devmgr_rm_dev);
...@@ -154,17 +335,24 @@ EXPORT_SYMBOL_GPL(adf_devmgr_pci_to_accel_dev); ...@@ -154,17 +335,24 @@ EXPORT_SYMBOL_GPL(adf_devmgr_pci_to_accel_dev);
struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id) struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id)
{ {
struct list_head *itr; struct list_head *itr;
int real_id;
mutex_lock(&table_lock); mutex_lock(&table_lock);
real_id = adf_get_vf_real_id(id);
if (real_id < 0)
goto unlock;
id = real_id;
list_for_each(itr, &accel_table) { list_for_each(itr, &accel_table) {
struct adf_accel_dev *ptr = struct adf_accel_dev *ptr =
list_entry(itr, struct adf_accel_dev, list); list_entry(itr, struct adf_accel_dev, list);
if (ptr->accel_id == id) { if (ptr->accel_id == id) {
mutex_unlock(&table_lock); mutex_unlock(&table_lock);
return ptr; return ptr;
} }
} }
unlock:
mutex_unlock(&table_lock); mutex_unlock(&table_lock);
return NULL; return NULL;
} }
...@@ -180,16 +368,52 @@ int adf_devmgr_verify_id(uint32_t id) ...@@ -180,16 +368,52 @@ int adf_devmgr_verify_id(uint32_t id)
return -ENODEV; return -ENODEV;
} }
static int adf_get_num_dettached_vfs(void)
{
struct list_head *itr;
int vfs = 0;
mutex_lock(&table_lock);
list_for_each(itr, &vfs_table) {
struct vf_id_map *ptr =
list_entry(itr, struct vf_id_map, list);
if (ptr->bdf != ~0 && !ptr->attached)
vfs++;
}
mutex_unlock(&table_lock);
return vfs;
}
void adf_devmgr_get_num_dev(uint32_t *num) void adf_devmgr_get_num_dev(uint32_t *num)
{ {
*num = num_devices; *num = num_devices - adf_get_num_dettached_vfs();
} }
/**
* adf_dev_in_use() - Check whether accel_dev is currently in use
* @accel_dev: Pointer to acceleration device.
*
* To be used by QAT device specific drivers.
*
* Return: 1 when device is in use, 0 otherwise.
*/
int adf_dev_in_use(struct adf_accel_dev *accel_dev) int adf_dev_in_use(struct adf_accel_dev *accel_dev)
{ {
return atomic_read(&accel_dev->ref_count) != 0; return atomic_read(&accel_dev->ref_count) != 0;
} }
EXPORT_SYMBOL_GPL(adf_dev_in_use);
/**
* adf_dev_get() - Increment accel_dev reference count
* @accel_dev: Pointer to acceleration device.
*
* Increment the accel_dev refcount and if this is the first time
* incrementing it during this period the accel_dev is in use,
* increment the module refcount too.
* To be used by QAT device specific drivers.
*
* Return: 0 when successful, EFAULT when fail to bump module refcount
*/
int adf_dev_get(struct adf_accel_dev *accel_dev) int adf_dev_get(struct adf_accel_dev *accel_dev)
{ {
if (atomic_add_return(1, &accel_dev->ref_count) == 1) if (atomic_add_return(1, &accel_dev->ref_count) == 1)
...@@ -197,19 +421,50 @@ int adf_dev_get(struct adf_accel_dev *accel_dev) ...@@ -197,19 +421,50 @@ int adf_dev_get(struct adf_accel_dev *accel_dev)
return -EFAULT; return -EFAULT;
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(adf_dev_get);
/**
* adf_dev_put() - Decrement accel_dev reference count
* @accel_dev: Pointer to acceleration device.
*
* Decrement the accel_dev refcount and if this is the last time
* decrementing it during this period the accel_dev is in use,
* decrement the module refcount too.
* To be used by QAT device specific drivers.
*
* Return: void
*/
void adf_dev_put(struct adf_accel_dev *accel_dev) void adf_dev_put(struct adf_accel_dev *accel_dev)
{ {
if (atomic_sub_return(1, &accel_dev->ref_count) == 0) if (atomic_sub_return(1, &accel_dev->ref_count) == 0)
module_put(accel_dev->owner); module_put(accel_dev->owner);
} }
EXPORT_SYMBOL_GPL(adf_dev_put);
/**
* adf_devmgr_in_reset() - Check whether device is in reset
* @accel_dev: Pointer to acceleration device.
*
* To be used by QAT device specific drivers.
*
* Return: 1 when the device is being reset, 0 otherwise.
*/
int adf_devmgr_in_reset(struct adf_accel_dev *accel_dev) int adf_devmgr_in_reset(struct adf_accel_dev *accel_dev)
{ {
return test_bit(ADF_STATUS_RESTARTING, &accel_dev->status); return test_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
} }
EXPORT_SYMBOL_GPL(adf_devmgr_in_reset);
/**
* adf_dev_started() - Check whether device has started
* @accel_dev: Pointer to acceleration device.
*
* To be used by QAT device specific drivers.
*
* Return: 1 when the device has started, 0 otherwise
*/
int adf_dev_started(struct adf_accel_dev *accel_dev) int adf_dev_started(struct adf_accel_dev *accel_dev)
{ {
return test_bit(ADF_STATUS_STARTED, &accel_dev->status); return test_bit(ADF_STATUS_STARTED, &accel_dev->status);
} }
EXPORT_SYMBOL_GPL(adf_dev_started);
...@@ -187,6 +187,7 @@ int adf_dev_init(struct adf_accel_dev *accel_dev) ...@@ -187,6 +187,7 @@ int adf_dev_init(struct adf_accel_dev *accel_dev)
} }
hw_data->enable_error_correction(accel_dev); hw_data->enable_error_correction(accel_dev);
hw_data->enable_vf2pf_comms(accel_dev);
return 0; return 0;
} }
...@@ -235,7 +236,8 @@ int adf_dev_start(struct adf_accel_dev *accel_dev) ...@@ -235,7 +236,8 @@ int adf_dev_start(struct adf_accel_dev *accel_dev)
clear_bit(ADF_STATUS_STARTING, &accel_dev->status); clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
set_bit(ADF_STATUS_STARTED, &accel_dev->status); set_bit(ADF_STATUS_STARTED, &accel_dev->status);
if (qat_algs_register() || qat_asym_algs_register()) { if (!list_empty(&accel_dev->crypto_list) &&
(qat_algs_register() || qat_asym_algs_register())) {
dev_err(&GET_DEV(accel_dev), dev_err(&GET_DEV(accel_dev),
"Failed to register crypto algs\n"); "Failed to register crypto algs\n");
set_bit(ADF_STATUS_STARTING, &accel_dev->status); set_bit(ADF_STATUS_STARTING, &accel_dev->status);
...@@ -270,11 +272,12 @@ int adf_dev_stop(struct adf_accel_dev *accel_dev) ...@@ -270,11 +272,12 @@ int adf_dev_stop(struct adf_accel_dev *accel_dev)
clear_bit(ADF_STATUS_STARTING, &accel_dev->status); clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
clear_bit(ADF_STATUS_STARTED, &accel_dev->status); clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
if (qat_algs_unregister()) if (!list_empty(&accel_dev->crypto_list) && qat_algs_unregister())
dev_err(&GET_DEV(accel_dev), dev_err(&GET_DEV(accel_dev),
"Failed to unregister crypto algs\n"); "Failed to unregister crypto algs\n");
qat_asym_algs_unregister(); if (!list_empty(&accel_dev->crypto_list))
qat_asym_algs_unregister();
list_for_each(list_itr, &service_table) { list_for_each(list_itr, &service_table) {
service = list_entry(list_itr, struct service_hndl, list); service = list_entry(list_itr, struct service_hndl, list);
...@@ -363,6 +366,7 @@ void adf_dev_shutdown(struct adf_accel_dev *accel_dev) ...@@ -363,6 +366,7 @@ void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
if (hw_data->exit_admin_comms) if (hw_data->exit_admin_comms)
hw_data->exit_admin_comms(accel_dev); hw_data->exit_admin_comms(accel_dev);
hw_data->disable_iov(accel_dev);
adf_cleanup_etr_data(accel_dev); adf_cleanup_etr_data(accel_dev);
} }
EXPORT_SYMBOL_GPL(adf_dev_shutdown); EXPORT_SYMBOL_GPL(adf_dev_shutdown);
......
This diff is collapsed.
/*
This file is provided under a dual BSD/GPLv2 license. When using or
redistributing this file, you may do so under either license.
GPL LICENSE SUMMARY
Copyright(c) 2015 Intel Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of version 2 of the GNU General Public License as
published by the Free Software Foundation.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Contact Information:
qat-linux@intel.com
BSD LICENSE
Copyright(c) 2015 Intel Corporation.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef ADF_PF2VF_MSG_H
#define ADF_PF2VF_MSG_H
/*
* PF<->VF Messaging
* The PF has an array of 32-bit PF2VF registers, one for each VF. The
* PF can access all these registers; each VF can access only the one
* register associated with that particular VF.
*
* The register functionally is split into two parts:
* The bottom half is for PF->VF messages. In particular when the first
* bit of this register (bit 0) gets set an interrupt will be triggered
* in the respective VF.
* The top half is for VF->PF messages. In particular when the first bit
* of this half of register (bit 16) gets set an interrupt will be triggered
* in the PF.
*
* The remaining bits within this register are available to encode messages.
* and implement a collision control mechanism to prevent concurrent use of
* the PF2VF register by both the PF and VF.
*
* 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16
* _______________________________________________
* | | | | | | | | | | | | | | | | |
* +-----------------------------------------------+
* \___________________________/ \_________/ ^ ^
* ^ ^ | |
* | | | VF2PF Int
* | | Message Origin
* | Message Type
* Message-specific Data/Reserved
*
* 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
* _______________________________________________
* | | | | | | | | | | | | | | | | |
* +-----------------------------------------------+
* \___________________________/ \_________/ ^ ^
* ^ ^ | |
* | | | PF2VF Int
* | | Message Origin
* | Message Type
* Message-specific Data/Reserved
*
* Message Origin (Should always be 1)
* A legacy out-of-tree QAT driver allowed for a set of messages not supported
* by this driver; these had a Msg Origin of 0 and are ignored by this driver.
*
* When a PF or VF attempts to send a message in the lower or upper 16 bits,
* respectively, the other 16 bits are written to first with a defined
* IN_USE_BY pattern as part of a collision control scheme (see adf_iov_putmsg).
*/
#define ADF_PFVF_COMPATIBILITY_VERSION 0x1 /* PF<->VF compat */
/* PF->VF messages */
#define ADF_PF2VF_INT BIT(0)
#define ADF_PF2VF_MSGORIGIN_SYSTEM BIT(1)
#define ADF_PF2VF_MSGTYPE_MASK 0x0000003C
#define ADF_PF2VF_MSGTYPE_SHIFT 2
#define ADF_PF2VF_MSGTYPE_RESTARTING 0x01
#define ADF_PF2VF_MSGTYPE_VERSION_RESP 0x02
#define ADF_PF2VF_IN_USE_BY_PF 0x6AC20000
#define ADF_PF2VF_IN_USE_BY_PF_MASK 0xFFFE0000
/* PF->VF Version Response */
#define ADF_PF2VF_VERSION_RESP_VERS_MASK 0x00003FC0
#define ADF_PF2VF_VERSION_RESP_VERS_SHIFT 6
#define ADF_PF2VF_VERSION_RESP_RESULT_MASK 0x0000C000
#define ADF_PF2VF_VERSION_RESP_RESULT_SHIFT 14
#define ADF_PF2VF_VF_COMPATIBLE 1
#define ADF_PF2VF_VF_INCOMPATIBLE 2
#define ADF_PF2VF_VF_COMPAT_UNKNOWN 3
/* VF->PF messages */
#define ADF_VF2PF_IN_USE_BY_VF 0x00006AC2
#define ADF_VF2PF_IN_USE_BY_VF_MASK 0x0000FFFE
#define ADF_VF2PF_INT BIT(16)
#define ADF_VF2PF_MSGORIGIN_SYSTEM BIT(17)
#define ADF_VF2PF_MSGTYPE_MASK 0x003C0000
#define ADF_VF2PF_MSGTYPE_SHIFT 18
#define ADF_VF2PF_MSGTYPE_INIT 0x3
#define ADF_VF2PF_MSGTYPE_SHUTDOWN 0x4
#define ADF_VF2PF_MSGTYPE_VERSION_REQ 0x5
#define ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ 0x6
/* VF->PF Compatible Version Request */
#define ADF_VF2PF_COMPAT_VER_REQ_SHIFT 22
/* Collision detection */
#define ADF_IOV_MSG_COLLISION_DETECT_DELAY 10
#define ADF_IOV_MSG_ACK_DELAY 2
#define ADF_IOV_MSG_ACK_MAX_RETRY 100
#define ADF_IOV_MSG_RETRY_DELAY 5
#define ADF_IOV_MSG_MAX_RETRIES 3
#define ADF_IOV_MSG_RESP_TIMEOUT (ADF_IOV_MSG_ACK_DELAY * \
ADF_IOV_MSG_ACK_MAX_RETRY + \
ADF_IOV_MSG_COLLISION_DETECT_DELAY)
#endif /* ADF_IOV_MSG_H */
This diff is collapsed.
...@@ -103,9 +103,11 @@ struct qat_crypto_instance *qat_crypto_get_instance_node(int node) ...@@ -103,9 +103,11 @@ struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
list_for_each(itr, adf_devmgr_get_head()) { list_for_each(itr, adf_devmgr_get_head()) {
accel_dev = list_entry(itr, struct adf_accel_dev, list); accel_dev = list_entry(itr, struct adf_accel_dev, list);
if ((node == dev_to_node(&GET_DEV(accel_dev)) || if ((node == dev_to_node(&GET_DEV(accel_dev)) ||
dev_to_node(&GET_DEV(accel_dev)) < 0) && dev_to_node(&GET_DEV(accel_dev)) < 0) &&
adf_dev_started(accel_dev)) adf_dev_started(accel_dev) &&
!list_empty(&accel_dev->crypto_list))
break; break;
accel_dev = NULL; accel_dev = NULL;
} }
......
...@@ -45,6 +45,7 @@ ...@@ -45,6 +45,7 @@
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/ */
#include <adf_accel_devices.h> #include <adf_accel_devices.h>
#include <adf_pf2vf_msg.h>
#include <adf_common_drv.h> #include <adf_common_drv.h>
#include "adf_dh895xcc_hw_data.h" #include "adf_dh895xcc_hw_data.h"
#include "adf_drv.h" #include "adf_drv.h"
...@@ -161,6 +162,16 @@ void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev, ...@@ -161,6 +162,16 @@ void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev,
} }
} }
static uint32_t get_pf2vf_offset(uint32_t i)
{
return ADF_DH895XCC_PF2VF_OFFSET(i);
}
static uint32_t get_vintmsk_offset(uint32_t i)
{
return ADF_DH895XCC_VINTMSK_OFFSET(i);
}
static void adf_enable_error_correction(struct adf_accel_dev *accel_dev) static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
{ {
struct adf_hw_device_data *hw_device = accel_dev->hw_device; struct adf_hw_device_data *hw_device = accel_dev->hw_device;
...@@ -197,11 +208,17 @@ static void adf_enable_ints(struct adf_accel_dev *accel_dev) ...@@ -197,11 +208,17 @@ static void adf_enable_ints(struct adf_accel_dev *accel_dev)
/* Enable bundle and misc interrupts */ /* Enable bundle and misc interrupts */
ADF_CSR_WR(addr, ADF_DH895XCC_SMIAPF0_MASK_OFFSET, ADF_CSR_WR(addr, ADF_DH895XCC_SMIAPF0_MASK_OFFSET,
ADF_DH895XCC_SMIA0_MASK); accel_dev->pf.vf_info ? 0 :
GENMASK_ULL(GET_MAX_BANKS(accel_dev) - 1, 0));
ADF_CSR_WR(addr, ADF_DH895XCC_SMIAPF1_MASK_OFFSET, ADF_CSR_WR(addr, ADF_DH895XCC_SMIAPF1_MASK_OFFSET,
ADF_DH895XCC_SMIA1_MASK); ADF_DH895XCC_SMIA1_MASK);
} }
static int adf_pf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
{
return 0;
}
void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data) void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
{ {
hw_data->dev_class = &dh895xcc_class; hw_data->dev_class = &dh895xcc_class;
...@@ -221,17 +238,22 @@ void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data) ...@@ -221,17 +238,22 @@ void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
hw_data->get_num_aes = get_num_aes; hw_data->get_num_aes = get_num_aes;
hw_data->get_etr_bar_id = get_etr_bar_id; hw_data->get_etr_bar_id = get_etr_bar_id;
hw_data->get_misc_bar_id = get_misc_bar_id; hw_data->get_misc_bar_id = get_misc_bar_id;
hw_data->get_pf2vf_offset = get_pf2vf_offset;
hw_data->get_vintmsk_offset = get_vintmsk_offset;
hw_data->get_sram_bar_id = get_sram_bar_id; hw_data->get_sram_bar_id = get_sram_bar_id;
hw_data->get_sku = get_sku; hw_data->get_sku = get_sku;
hw_data->fw_name = ADF_DH895XCC_FW; hw_data->fw_name = ADF_DH895XCC_FW;
hw_data->fw_mmp_name = ADF_DH895XCC_MMP; hw_data->fw_mmp_name = ADF_DH895XCC_MMP;
hw_data->init_admin_comms = adf_init_admin_comms; hw_data->init_admin_comms = adf_init_admin_comms;
hw_data->exit_admin_comms = adf_exit_admin_comms; hw_data->exit_admin_comms = adf_exit_admin_comms;
hw_data->disable_iov = adf_disable_sriov;
hw_data->send_admin_init = adf_send_admin_init; hw_data->send_admin_init = adf_send_admin_init;
hw_data->init_arb = adf_init_arb; hw_data->init_arb = adf_init_arb;
hw_data->exit_arb = adf_exit_arb; hw_data->exit_arb = adf_exit_arb;
hw_data->get_arb_mapping = adf_get_arbiter_mapping; hw_data->get_arb_mapping = adf_get_arbiter_mapping;
hw_data->enable_ints = adf_enable_ints; hw_data->enable_ints = adf_enable_ints;
hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms;
hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
} }
void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data) void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
......
...@@ -80,6 +80,10 @@ ...@@ -80,6 +80,10 @@
#define ADF_DH895XCC_CERRSSMSH(i) (i * 0x4000 + 0x10) #define ADF_DH895XCC_CERRSSMSH(i) (i * 0x4000 + 0x10)
#define ADF_DH895XCC_ERRSSMSH_EN BIT(3) #define ADF_DH895XCC_ERRSSMSH_EN BIT(3)
#define ADF_DH895XCC_ERRSOU3 (0x3A000 + 0x00C)
#define ADF_DH895XCC_ERRSOU5 (0x3A000 + 0x0D8)
#define ADF_DH895XCC_PF2VF_OFFSET(i) (0x3A000 + 0x280 + ((i) * 0x04))
#define ADF_DH895XCC_VINTMSK_OFFSET(i) (0x3A000 + 0x200 + ((i) * 0x04))
/* FW names */ /* FW names */
#define ADF_DH895XCC_FW "qat_895xcc.bin" #define ADF_DH895XCC_FW "qat_895xcc.bin"
#define ADF_DH895XCC_MMP "qat_mmp.bin" #define ADF_DH895XCC_MMP "qat_mmp.bin"
......
...@@ -82,16 +82,21 @@ static struct pci_driver adf_driver = { ...@@ -82,16 +82,21 @@ static struct pci_driver adf_driver = {
.id_table = adf_pci_tbl, .id_table = adf_pci_tbl,
.name = adf_driver_name, .name = adf_driver_name,
.probe = adf_probe, .probe = adf_probe,
.remove = adf_remove .remove = adf_remove,
.sriov_configure = adf_sriov_configure,
}; };
static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
{
pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
}
static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
{ {
struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev; struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
int i; int i;
adf_dev_shutdown(accel_dev);
for (i = 0; i < ADF_PCI_MAX_BARS; i++) { for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
struct adf_bar *bar = &accel_pci_dev->pci_bars[i]; struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
...@@ -108,13 +113,11 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) ...@@ -108,13 +113,11 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
break; break;
} }
kfree(accel_dev->hw_device); kfree(accel_dev->hw_device);
accel_dev->hw_device = NULL;
} }
adf_cfg_dev_remove(accel_dev); adf_cfg_dev_remove(accel_dev);
debugfs_remove(accel_dev->debugfs_dir); debugfs_remove(accel_dev->debugfs_dir);
adf_devmgr_rm_dev(accel_dev); adf_devmgr_rm_dev(accel_dev, NULL);
pci_release_regions(accel_pci_dev->pci_dev);
pci_disable_device(accel_pci_dev->pci_dev);
kfree(accel_dev);
} }
static int adf_dev_configure(struct adf_accel_dev *accel_dev) static int adf_dev_configure(struct adf_accel_dev *accel_dev)
...@@ -205,7 +208,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -205,7 +208,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct adf_hw_device_data *hw_data; struct adf_hw_device_data *hw_data;
char name[ADF_DEVICE_NAME_LENGTH]; char name[ADF_DEVICE_NAME_LENGTH];
unsigned int i, bar_nr; unsigned int i, bar_nr;
int ret; int ret, bar_mask;
switch (ent->device) { switch (ent->device) {
case ADF_DH895XCC_PCI_DEVICE_ID: case ADF_DH895XCC_PCI_DEVICE_ID:
...@@ -229,10 +232,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -229,10 +232,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENOMEM; return -ENOMEM;
INIT_LIST_HEAD(&accel_dev->crypto_list); INIT_LIST_HEAD(&accel_dev->crypto_list);
accel_pci_dev = &accel_dev->accel_pci_dev;
accel_pci_dev->pci_dev = pdev;
/* Add accel device to accel table. /* Add accel device to accel table.
* This should be called before adf_cleanup_accel is called */ * This should be called before adf_cleanup_accel is called */
if (adf_devmgr_add_dev(accel_dev)) { if (adf_devmgr_add_dev(accel_dev, NULL)) {
dev_err(&pdev->dev, "Failed to add new accelerator device.\n"); dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
kfree(accel_dev); kfree(accel_dev);
return -EFAULT; return -EFAULT;
...@@ -255,7 +260,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -255,7 +260,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
default: default:
return -ENODEV; return -ENODEV;
} }
accel_pci_dev = &accel_dev->accel_pci_dev;
pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid); pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
pci_read_config_dword(pdev, ADF_DH895XCC_FUSECTL_OFFSET, pci_read_config_dword(pdev, ADF_DH895XCC_FUSECTL_OFFSET,
&hw_data->fuses); &hw_data->fuses);
...@@ -264,7 +268,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -264,7 +268,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses); hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses);
hw_data->ae_mask = hw_data->get_ae_mask(hw_data->fuses); hw_data->ae_mask = hw_data->get_ae_mask(hw_data->fuses);
accel_pci_dev->sku = hw_data->get_sku(hw_data); accel_pci_dev->sku = hw_data->get_sku(hw_data);
accel_pci_dev->pci_dev = pdev;
/* If the device has no acceleration engines then ignore it. */ /* If the device has no acceleration engines then ignore it. */
if (!hw_data->accel_mask || !hw_data->ae_mask || if (!hw_data->accel_mask || !hw_data->ae_mask ||
((~hw_data->ae_mask) & 0x01)) { ((~hw_data->ae_mask) & 0x01)) {
...@@ -274,11 +277,14 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -274,11 +277,14 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
} }
/* Create dev top level debugfs entry */ /* Create dev top level debugfs entry */
snprintf(name, sizeof(name), "%s%s_dev%d", ADF_DEVICE_NAME_PREFIX, snprintf(name, sizeof(name), "%s%s_%02x:%02d.%02d",
hw_data->dev_class->name, hw_data->instance_id); ADF_DEVICE_NAME_PREFIX, hw_data->dev_class->name,
pdev->bus->number, PCI_SLOT(pdev->devfn),
PCI_FUNC(pdev->devfn));
accel_dev->debugfs_dir = debugfs_create_dir(name, NULL); accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
if (!accel_dev->debugfs_dir) { if (!accel_dev->debugfs_dir) {
dev_err(&pdev->dev, "Could not create debugfs dir\n"); dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name);
ret = -EINVAL; ret = -EINVAL;
goto out_err; goto out_err;
} }
...@@ -301,7 +307,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -301,7 +307,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
dev_err(&pdev->dev, "No usable DMA configuration\n"); dev_err(&pdev->dev, "No usable DMA configuration\n");
ret = -EFAULT; ret = -EFAULT;
goto out_err; goto out_err_disable;
} else { } else {
pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
} }
...@@ -312,7 +318,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -312,7 +318,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (pci_request_regions(pdev, adf_driver_name)) { if (pci_request_regions(pdev, adf_driver_name)) {
ret = -EFAULT; ret = -EFAULT;
goto out_err; goto out_err_disable;
} }
/* Read accelerator capabilities mask */ /* Read accelerator capabilities mask */
...@@ -320,19 +326,21 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -320,19 +326,21 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
&hw_data->accel_capabilities_mask); &hw_data->accel_capabilities_mask);
/* Find and map all the device's BARS */ /* Find and map all the device's BARS */
for (i = 0; i < ADF_PCI_MAX_BARS; i++) { i = 0;
struct adf_bar *bar = &accel_pci_dev->pci_bars[i]; bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
ADF_PCI_MAX_BARS * 2) {
struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
bar_nr = i * 2;
bar->base_addr = pci_resource_start(pdev, bar_nr); bar->base_addr = pci_resource_start(pdev, bar_nr);
if (!bar->base_addr) if (!bar->base_addr)
break; break;
bar->size = pci_resource_len(pdev, bar_nr); bar->size = pci_resource_len(pdev, bar_nr);
bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0); bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
if (!bar->virt_addr) { if (!bar->virt_addr) {
dev_err(&pdev->dev, "Failed to map BAR %d\n", i); dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
ret = -EFAULT; ret = -EFAULT;
goto out_err; goto out_err_free_reg;
} }
} }
pci_set_master(pdev); pci_set_master(pdev);
...@@ -340,32 +348,40 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -340,32 +348,40 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (adf_enable_aer(accel_dev, &adf_driver)) { if (adf_enable_aer(accel_dev, &adf_driver)) {
dev_err(&pdev->dev, "Failed to enable aer\n"); dev_err(&pdev->dev, "Failed to enable aer\n");
ret = -EFAULT; ret = -EFAULT;
goto out_err; goto out_err_free_reg;
} }
if (pci_save_state(pdev)) { if (pci_save_state(pdev)) {
dev_err(&pdev->dev, "Failed to save pci state\n"); dev_err(&pdev->dev, "Failed to save pci state\n");
ret = -ENOMEM; ret = -ENOMEM;
goto out_err; goto out_err_free_reg;
} }
ret = adf_dev_configure(accel_dev); ret = adf_dev_configure(accel_dev);
if (ret) if (ret)
goto out_err; goto out_err_free_reg;
ret = adf_dev_init(accel_dev); ret = adf_dev_init(accel_dev);
if (ret) if (ret)
goto out_err; goto out_err_dev_shutdown;
ret = adf_dev_start(accel_dev); ret = adf_dev_start(accel_dev);
if (ret) { if (ret)
adf_dev_stop(accel_dev); goto out_err_dev_stop;
goto out_err;
}
return 0; return ret;
out_err_dev_stop:
adf_dev_stop(accel_dev);
out_err_dev_shutdown:
adf_dev_shutdown(accel_dev);
out_err_free_reg:
pci_release_regions(accel_pci_dev->pci_dev);
out_err_disable:
pci_disable_device(accel_pci_dev->pci_dev);
out_err: out_err:
adf_cleanup_accel(accel_dev); adf_cleanup_accel(accel_dev);
kfree(accel_dev);
return ret; return ret;
} }
...@@ -379,8 +395,12 @@ static void adf_remove(struct pci_dev *pdev) ...@@ -379,8 +395,12 @@ static void adf_remove(struct pci_dev *pdev)
} }
if (adf_dev_stop(accel_dev)) if (adf_dev_stop(accel_dev))
dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n"); dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n");
adf_dev_shutdown(accel_dev);
adf_disable_aer(accel_dev); adf_disable_aer(accel_dev);
adf_cleanup_accel(accel_dev); adf_cleanup_accel(accel_dev);
adf_cleanup_pci_dev(accel_dev);
kfree(accel_dev);
} }
static int __init adfdrv_init(void) static int __init adfdrv_init(void)
......
...@@ -59,21 +59,30 @@ ...@@ -59,21 +59,30 @@
#include <adf_transport_access_macros.h> #include <adf_transport_access_macros.h>
#include <adf_transport_internal.h> #include <adf_transport_internal.h>
#include "adf_drv.h" #include "adf_drv.h"
#include "adf_dh895xcc_hw_data.h"
static int adf_enable_msix(struct adf_accel_dev *accel_dev) static int adf_enable_msix(struct adf_accel_dev *accel_dev)
{ {
struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev; struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct adf_hw_device_data *hw_data = accel_dev->hw_device;
uint32_t msix_num_entries = hw_data->num_banks + 1; u32 msix_num_entries = 1;
int i;
/* If SR-IOV is disabled, add entries for each bank */
for (i = 0; i < msix_num_entries; i++) if (!accel_dev->pf.vf_info) {
pci_dev_info->msix_entries.entries[i].entry = i; int i;
msix_num_entries += hw_data->num_banks;
for (i = 0; i < msix_num_entries; i++)
pci_dev_info->msix_entries.entries[i].entry = i;
} else {
pci_dev_info->msix_entries.entries[0].entry =
hw_data->num_banks;
}
if (pci_enable_msix_exact(pci_dev_info->pci_dev, if (pci_enable_msix_exact(pci_dev_info->pci_dev,
pci_dev_info->msix_entries.entries, pci_dev_info->msix_entries.entries,
msix_num_entries)) { msix_num_entries)) {
dev_err(&GET_DEV(accel_dev), "Failed to enable MSIX IRQ\n"); dev_err(&GET_DEV(accel_dev), "Failed to enable MSI-X IRQ(s)\n");
return -EFAULT; return -EFAULT;
} }
return 0; return 0;
...@@ -97,9 +106,58 @@ static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr) ...@@ -97,9 +106,58 @@ static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
{ {
struct adf_accel_dev *accel_dev = dev_ptr; struct adf_accel_dev *accel_dev = dev_ptr;
dev_info(&GET_DEV(accel_dev), "qat_dev%d spurious AE interrupt\n", #ifdef CONFIG_PCI_IOV
accel_dev->accel_id); /* If SR-IOV is enabled (vf_info is non-NULL), check for VF->PF ints */
return IRQ_HANDLED; if (accel_dev->pf.vf_info) {
void __iomem *pmisc_bar_addr =
(&GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR])->virt_addr;
u32 vf_mask;
/* Get the interrupt sources triggered by VFs */
vf_mask = ((ADF_CSR_RD(pmisc_bar_addr, ADF_DH895XCC_ERRSOU5) &
0x0000FFFF) << 16) |
((ADF_CSR_RD(pmisc_bar_addr, ADF_DH895XCC_ERRSOU3) &
0x01FFFE00) >> 9);
if (vf_mask) {
struct adf_accel_vf_info *vf_info;
bool irq_handled = false;
int i;
/* Disable VF2PF interrupts for VFs with pending ints */
adf_disable_vf2pf_interrupts(accel_dev, vf_mask);
/*
* Schedule tasklets to handle VF2PF interrupt BHs
* unless the VF is malicious and is attempting to
* flood the host OS with VF2PF interrupts.
*/
for_each_set_bit(i, (const unsigned long *)&vf_mask,
(sizeof(vf_mask) * BITS_PER_BYTE)) {
vf_info = accel_dev->pf.vf_info + i;
if (!__ratelimit(&vf_info->vf2pf_ratelimit)) {
dev_info(&GET_DEV(accel_dev),
"Too many ints from VF%d\n",
vf_info->vf_nr + 1);
continue;
}
/* Tasklet will re-enable ints from this VF */
tasklet_hi_schedule(&vf_info->vf2pf_bh_tasklet);
irq_handled = true;
}
if (irq_handled)
return IRQ_HANDLED;
}
}
#endif /* CONFIG_PCI_IOV */
dev_dbg(&GET_DEV(accel_dev), "qat_dev%d spurious AE interrupt\n",
accel_dev->accel_id);
return IRQ_NONE;
} }
static int adf_request_irqs(struct adf_accel_dev *accel_dev) static int adf_request_irqs(struct adf_accel_dev *accel_dev)
...@@ -108,28 +166,32 @@ static int adf_request_irqs(struct adf_accel_dev *accel_dev) ...@@ -108,28 +166,32 @@ static int adf_request_irqs(struct adf_accel_dev *accel_dev)
struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct msix_entry *msixe = pci_dev_info->msix_entries.entries; struct msix_entry *msixe = pci_dev_info->msix_entries.entries;
struct adf_etr_data *etr_data = accel_dev->transport; struct adf_etr_data *etr_data = accel_dev->transport;
int ret, i; int ret, i = 0;
char *name; char *name;
/* Request msix irq for all banks */ /* Request msix irq for all banks unless SR-IOV enabled */
for (i = 0; i < hw_data->num_banks; i++) { if (!accel_dev->pf.vf_info) {
struct adf_etr_bank_data *bank = &etr_data->banks[i]; for (i = 0; i < hw_data->num_banks; i++) {
unsigned int cpu, cpus = num_online_cpus(); struct adf_etr_bank_data *bank = &etr_data->banks[i];
unsigned int cpu, cpus = num_online_cpus();
name = *(pci_dev_info->msix_entries.names + i);
snprintf(name, ADF_MAX_MSIX_VECTOR_NAME, name = *(pci_dev_info->msix_entries.names + i);
"qat%d-bundle%d", accel_dev->accel_id, i); snprintf(name, ADF_MAX_MSIX_VECTOR_NAME,
ret = request_irq(msixe[i].vector, "qat%d-bundle%d", accel_dev->accel_id, i);
adf_msix_isr_bundle, 0, name, bank); ret = request_irq(msixe[i].vector,
if (ret) { adf_msix_isr_bundle, 0, name, bank);
dev_err(&GET_DEV(accel_dev), if (ret) {
"failed to enable irq %d for %s\n", dev_err(&GET_DEV(accel_dev),
msixe[i].vector, name); "failed to enable irq %d for %s\n",
return ret; msixe[i].vector, name);
return ret;
}
cpu = ((accel_dev->accel_id * hw_data->num_banks) +
i) % cpus;
irq_set_affinity_hint(msixe[i].vector,
get_cpu_mask(cpu));
} }
cpu = ((accel_dev->accel_id * hw_data->num_banks) + i) % cpus;
irq_set_affinity_hint(msixe[i].vector, get_cpu_mask(cpu));
} }
/* Request msix irq for AE */ /* Request msix irq for AE */
...@@ -152,11 +214,13 @@ static void adf_free_irqs(struct adf_accel_dev *accel_dev) ...@@ -152,11 +214,13 @@ static void adf_free_irqs(struct adf_accel_dev *accel_dev)
struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct msix_entry *msixe = pci_dev_info->msix_entries.entries; struct msix_entry *msixe = pci_dev_info->msix_entries.entries;
struct adf_etr_data *etr_data = accel_dev->transport; struct adf_etr_data *etr_data = accel_dev->transport;
int i; int i = 0;
for (i = 0; i < hw_data->num_banks; i++) { if (pci_dev_info->msix_entries.num_entries > 1) {
irq_set_affinity_hint(msixe[i].vector, NULL); for (i = 0; i < hw_data->num_banks; i++) {
free_irq(msixe[i].vector, &etr_data->banks[i]); irq_set_affinity_hint(msixe[i].vector, NULL);
free_irq(msixe[i].vector, &etr_data->banks[i]);
}
} }
irq_set_affinity_hint(msixe[i].vector, NULL); irq_set_affinity_hint(msixe[i].vector, NULL);
free_irq(msixe[i].vector, accel_dev); free_irq(msixe[i].vector, accel_dev);
...@@ -168,7 +232,11 @@ static int adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev) ...@@ -168,7 +232,11 @@ static int adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev)
char **names; char **names;
struct msix_entry *entries; struct msix_entry *entries;
struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct adf_hw_device_data *hw_data = accel_dev->hw_device;
uint32_t msix_num_entries = hw_data->num_banks + 1; u32 msix_num_entries = 1;
/* If SR-IOV is disabled (vf_info is NULL), add entries for each bank */
if (!accel_dev->pf.vf_info)
msix_num_entries += hw_data->num_banks;
entries = kzalloc_node(msix_num_entries * sizeof(*entries), entries = kzalloc_node(msix_num_entries * sizeof(*entries),
GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev))); GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev)));
...@@ -185,6 +253,7 @@ static int adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev) ...@@ -185,6 +253,7 @@ static int adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev)
if (!(*(names + i))) if (!(*(names + i)))
goto err; goto err;
} }
accel_dev->accel_pci_dev.msix_entries.num_entries = msix_num_entries;
accel_dev->accel_pci_dev.msix_entries.entries = entries; accel_dev->accel_pci_dev.msix_entries.entries = entries;
accel_dev->accel_pci_dev.msix_entries.names = names; accel_dev->accel_pci_dev.msix_entries.names = names;
return 0; return 0;
...@@ -198,13 +267,11 @@ static int adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev) ...@@ -198,13 +267,11 @@ static int adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev)
static void adf_isr_free_msix_entry_table(struct adf_accel_dev *accel_dev) static void adf_isr_free_msix_entry_table(struct adf_accel_dev *accel_dev)
{ {
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
uint32_t msix_num_entries = hw_data->num_banks + 1;
char **names = accel_dev->accel_pci_dev.msix_entries.names; char **names = accel_dev->accel_pci_dev.msix_entries.names;
int i; int i;
kfree(accel_dev->accel_pci_dev.msix_entries.entries); kfree(accel_dev->accel_pci_dev.msix_entries.entries);
for (i = 0; i < msix_num_entries; i++) for (i = 0; i < accel_dev->accel_pci_dev.msix_entries.num_entries; i++)
kfree(*(names + i)); kfree(*(names + i));
kfree(names); kfree(names);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment