Commit 0a6cb34f authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Pull crypto fixes from Herbert Xu:
 - stack corruption fix for pseries hwrng driver
 - add missing DMA unmap in caam crypto driver
 - fix NUMA crash in qat crypto driver
 - fix buggy mapping of zero-length associated data in qat crypto driver

* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6:
  hwrng: pseries - port to new read API and fix stack corruption
  crypto: caam - fix missing dma unmap on error path
  crypto: qat - Enforce valid numa configuration
  crypto: qat - Prevent dma mapping zero length assoc data
parents 15e5cda9 24c65bc7
...@@ -25,18 +25,21 @@ ...@@ -25,18 +25,21 @@
#include <asm/vio.h> #include <asm/vio.h>
static int pseries_rng_data_read(struct hwrng *rng, u32 *data) static int pseries_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
{ {
u64 buffer[PLPAR_HCALL_BUFSIZE];
size_t size = max < 8 ? max : 8;
int rc; int rc;
rc = plpar_hcall(H_RANDOM, (unsigned long *)data); rc = plpar_hcall(H_RANDOM, (unsigned long *)buffer);
if (rc != H_SUCCESS) { if (rc != H_SUCCESS) {
pr_err_ratelimited("H_RANDOM call failed %d\n", rc); pr_err_ratelimited("H_RANDOM call failed %d\n", rc);
return -EIO; return -EIO;
} }
memcpy(data, buffer, size);
/* The hypervisor interface returns 64 bits */ /* The hypervisor interface returns 64 bits */
return 8; return size;
} }
/** /**
...@@ -55,7 +58,7 @@ static unsigned long pseries_rng_get_desired_dma(struct vio_dev *vdev) ...@@ -55,7 +58,7 @@ static unsigned long pseries_rng_get_desired_dma(struct vio_dev *vdev)
static struct hwrng pseries_rng = { static struct hwrng pseries_rng = {
.name = KBUILD_MODNAME, .name = KBUILD_MODNAME,
.data_read = pseries_rng_data_read, .read = pseries_rng_read,
}; };
static int __init pseries_rng_probe(struct vio_dev *dev, static int __init pseries_rng_probe(struct vio_dev *dev,
......
...@@ -48,23 +48,29 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len, ...@@ -48,23 +48,29 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
u32 *desc; u32 *desc;
struct split_key_result result; struct split_key_result result;
dma_addr_t dma_addr_in, dma_addr_out; dma_addr_t dma_addr_in, dma_addr_out;
int ret = 0; int ret = -ENOMEM;
desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
if (!desc) { if (!desc) {
dev_err(jrdev, "unable to allocate key input memory\n"); dev_err(jrdev, "unable to allocate key input memory\n");
return -ENOMEM; return ret;
} }
init_job_desc(desc, 0);
dma_addr_in = dma_map_single(jrdev, (void *)key_in, keylen, dma_addr_in = dma_map_single(jrdev, (void *)key_in, keylen,
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, dma_addr_in)) { if (dma_mapping_error(jrdev, dma_addr_in)) {
dev_err(jrdev, "unable to map key input memory\n"); dev_err(jrdev, "unable to map key input memory\n");
kfree(desc); goto out_free;
return -ENOMEM;
} }
dma_addr_out = dma_map_single(jrdev, key_out, split_key_pad_len,
DMA_FROM_DEVICE);
if (dma_mapping_error(jrdev, dma_addr_out)) {
dev_err(jrdev, "unable to map key output memory\n");
goto out_unmap_in;
}
init_job_desc(desc, 0);
append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG); append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG);
/* Sets MDHA up into an HMAC-INIT */ /* Sets MDHA up into an HMAC-INIT */
...@@ -81,13 +87,6 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len, ...@@ -81,13 +87,6 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
* FIFO_STORE with the explicit split-key content store * FIFO_STORE with the explicit split-key content store
* (0x26 output type) * (0x26 output type)
*/ */
dma_addr_out = dma_map_single(jrdev, key_out, split_key_pad_len,
DMA_FROM_DEVICE);
if (dma_mapping_error(jrdev, dma_addr_out)) {
dev_err(jrdev, "unable to map key output memory\n");
kfree(desc);
return -ENOMEM;
}
append_fifo_store(desc, dma_addr_out, split_key_len, append_fifo_store(desc, dma_addr_out, split_key_len,
LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK); LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
...@@ -115,10 +114,10 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len, ...@@ -115,10 +114,10 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
dma_unmap_single(jrdev, dma_addr_out, split_key_pad_len, dma_unmap_single(jrdev, dma_addr_out, split_key_pad_len,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
out_unmap_in:
dma_unmap_single(jrdev, dma_addr_in, keylen, DMA_TO_DEVICE); dma_unmap_single(jrdev, dma_addr_in, keylen, DMA_TO_DEVICE);
out_free:
kfree(desc); kfree(desc);
return ret; return ret;
} }
EXPORT_SYMBOL(gen_split_key); EXPORT_SYMBOL(gen_split_key);
...@@ -198,8 +198,7 @@ struct adf_accel_dev { ...@@ -198,8 +198,7 @@ struct adf_accel_dev {
struct dentry *debugfs_dir; struct dentry *debugfs_dir;
struct list_head list; struct list_head list;
struct module *owner; struct module *owner;
uint8_t accel_id;
uint8_t numa_node;
struct adf_accel_pci accel_pci_dev; struct adf_accel_pci accel_pci_dev;
uint8_t accel_id;
} __packed; } __packed;
#endif #endif
...@@ -419,9 +419,10 @@ static int adf_init_bank(struct adf_accel_dev *accel_dev, ...@@ -419,9 +419,10 @@ static int adf_init_bank(struct adf_accel_dev *accel_dev,
WRITE_CSR_RING_BASE(csr_addr, bank_num, i, 0); WRITE_CSR_RING_BASE(csr_addr, bank_num, i, 0);
ring = &bank->rings[i]; ring = &bank->rings[i];
if (hw_data->tx_rings_mask & (1 << i)) { if (hw_data->tx_rings_mask & (1 << i)) {
ring->inflights = kzalloc_node(sizeof(atomic_t), ring->inflights =
GFP_KERNEL, kzalloc_node(sizeof(atomic_t),
accel_dev->numa_node); GFP_KERNEL,
dev_to_node(&GET_DEV(accel_dev)));
if (!ring->inflights) if (!ring->inflights)
goto err; goto err;
} else { } else {
...@@ -469,13 +470,14 @@ int adf_init_etr_data(struct adf_accel_dev *accel_dev) ...@@ -469,13 +470,14 @@ int adf_init_etr_data(struct adf_accel_dev *accel_dev)
int i, ret; int i, ret;
etr_data = kzalloc_node(sizeof(*etr_data), GFP_KERNEL, etr_data = kzalloc_node(sizeof(*etr_data), GFP_KERNEL,
accel_dev->numa_node); dev_to_node(&GET_DEV(accel_dev)));
if (!etr_data) if (!etr_data)
return -ENOMEM; return -ENOMEM;
num_banks = GET_MAX_BANKS(accel_dev); num_banks = GET_MAX_BANKS(accel_dev);
size = num_banks * sizeof(struct adf_etr_bank_data); size = num_banks * sizeof(struct adf_etr_bank_data);
etr_data->banks = kzalloc_node(size, GFP_KERNEL, accel_dev->numa_node); etr_data->banks = kzalloc_node(size, GFP_KERNEL,
dev_to_node(&GET_DEV(accel_dev)));
if (!etr_data->banks) { if (!etr_data->banks) {
ret = -ENOMEM; ret = -ENOMEM;
goto err_bank; goto err_bank;
......
...@@ -596,7 +596,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, ...@@ -596,7 +596,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
if (unlikely(!n)) if (unlikely(!n))
return -EINVAL; return -EINVAL;
bufl = kmalloc_node(sz, GFP_ATOMIC, inst->accel_dev->numa_node); bufl = kmalloc_node(sz, GFP_ATOMIC,
dev_to_node(&GET_DEV(inst->accel_dev)));
if (unlikely(!bufl)) if (unlikely(!bufl))
return -ENOMEM; return -ENOMEM;
...@@ -605,6 +606,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, ...@@ -605,6 +606,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
goto err; goto err;
for_each_sg(assoc, sg, assoc_n, i) { for_each_sg(assoc, sg, assoc_n, i) {
if (!sg->length)
continue;
bufl->bufers[bufs].addr = dma_map_single(dev, bufl->bufers[bufs].addr = dma_map_single(dev,
sg_virt(sg), sg_virt(sg),
sg->length, sg->length,
...@@ -640,7 +643,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, ...@@ -640,7 +643,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
struct qat_alg_buf *bufers; struct qat_alg_buf *bufers;
buflout = kmalloc_node(sz, GFP_ATOMIC, buflout = kmalloc_node(sz, GFP_ATOMIC,
inst->accel_dev->numa_node); dev_to_node(&GET_DEV(inst->accel_dev)));
if (unlikely(!buflout)) if (unlikely(!buflout))
goto err; goto err;
bloutp = dma_map_single(dev, buflout, sz, DMA_TO_DEVICE); bloutp = dma_map_single(dev, buflout, sz, DMA_TO_DEVICE);
......
...@@ -109,12 +109,14 @@ struct qat_crypto_instance *qat_crypto_get_instance_node(int node) ...@@ -109,12 +109,14 @@ struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
list_for_each(itr, adf_devmgr_get_head()) { list_for_each(itr, adf_devmgr_get_head()) {
accel_dev = list_entry(itr, struct adf_accel_dev, list); accel_dev = list_entry(itr, struct adf_accel_dev, list);
if (accel_dev->numa_node == node && adf_dev_started(accel_dev)) if ((node == dev_to_node(&GET_DEV(accel_dev)) ||
dev_to_node(&GET_DEV(accel_dev)) < 0)
&& adf_dev_started(accel_dev))
break; break;
accel_dev = NULL; accel_dev = NULL;
} }
if (!accel_dev) { if (!accel_dev) {
pr_err("QAT: Could not find device on give node\n"); pr_err("QAT: Could not find device on node %d\n", node);
accel_dev = adf_devmgr_get_first(); accel_dev = adf_devmgr_get_first();
} }
if (!accel_dev || !adf_dev_started(accel_dev)) if (!accel_dev || !adf_dev_started(accel_dev))
...@@ -164,7 +166,7 @@ static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev) ...@@ -164,7 +166,7 @@ static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
for (i = 0; i < num_inst; i++) { for (i = 0; i < num_inst; i++) {
inst = kzalloc_node(sizeof(*inst), GFP_KERNEL, inst = kzalloc_node(sizeof(*inst), GFP_KERNEL,
accel_dev->numa_node); dev_to_node(&GET_DEV(accel_dev)));
if (!inst) if (!inst)
goto err; goto err;
......
...@@ -108,7 +108,7 @@ int adf_init_admin_comms(struct adf_accel_dev *accel_dev) ...@@ -108,7 +108,7 @@ int adf_init_admin_comms(struct adf_accel_dev *accel_dev)
uint64_t reg_val; uint64_t reg_val;
admin = kzalloc_node(sizeof(*accel_dev->admin), GFP_KERNEL, admin = kzalloc_node(sizeof(*accel_dev->admin), GFP_KERNEL,
accel_dev->numa_node); dev_to_node(&GET_DEV(accel_dev)));
if (!admin) if (!admin)
return -ENOMEM; return -ENOMEM;
admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE, admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
......
...@@ -119,21 +119,6 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) ...@@ -119,21 +119,6 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
kfree(accel_dev); kfree(accel_dev);
} }
static uint8_t adf_get_dev_node_id(struct pci_dev *pdev)
{
unsigned int bus_per_cpu = 0;
struct cpuinfo_x86 *c = &cpu_data(num_online_cpus() - 1);
if (!c->phys_proc_id)
return 0;
bus_per_cpu = 256 / (c->phys_proc_id + 1);
if (bus_per_cpu != 0)
return pdev->bus->number / bus_per_cpu;
return 0;
}
static int qat_dev_start(struct adf_accel_dev *accel_dev) static int qat_dev_start(struct adf_accel_dev *accel_dev)
{ {
int cpus = num_online_cpus(); int cpus = num_online_cpus();
...@@ -235,7 +220,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -235,7 +220,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
void __iomem *pmisc_bar_addr = NULL; void __iomem *pmisc_bar_addr = NULL;
char name[ADF_DEVICE_NAME_LENGTH]; char name[ADF_DEVICE_NAME_LENGTH];
unsigned int i, bar_nr; unsigned int i, bar_nr;
uint8_t node;
int ret; int ret;
switch (ent->device) { switch (ent->device) {
...@@ -246,12 +230,19 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -246,12 +230,19 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENODEV; return -ENODEV;
} }
node = adf_get_dev_node_id(pdev); if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) {
accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL, node); /* If the accelerator is connected to a node with no memory
* there is no point in using the accelerator since the remote
* memory transaction will be very slow. */
dev_err(&pdev->dev, "Invalid NUMA configuration.\n");
return -EINVAL;
}
accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
dev_to_node(&pdev->dev));
if (!accel_dev) if (!accel_dev)
return -ENOMEM; return -ENOMEM;
accel_dev->numa_node = node;
INIT_LIST_HEAD(&accel_dev->crypto_list); INIT_LIST_HEAD(&accel_dev->crypto_list);
/* Add accel device to accel table. /* Add accel device to accel table.
...@@ -264,7 +255,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -264,7 +255,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
accel_dev->owner = THIS_MODULE; accel_dev->owner = THIS_MODULE;
/* Allocate and configure device configuration structure */ /* Allocate and configure device configuration structure */
hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL, node); hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
dev_to_node(&pdev->dev));
if (!hw_data) { if (!hw_data) {
ret = -ENOMEM; ret = -ENOMEM;
goto out_err; goto out_err;
......
...@@ -168,7 +168,7 @@ static int adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev) ...@@ -168,7 +168,7 @@ static int adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev)
uint32_t msix_num_entries = hw_data->num_banks + 1; uint32_t msix_num_entries = hw_data->num_banks + 1;
entries = kzalloc_node(msix_num_entries * sizeof(*entries), entries = kzalloc_node(msix_num_entries * sizeof(*entries),
GFP_KERNEL, accel_dev->numa_node); GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev)));
if (!entries) if (!entries)
return -ENOMEM; return -ENOMEM;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment