Commit 5ee10982 authored by Jorgen Hansen's avatar Jorgen Hansen Committed by Greg Kroah-Hartman

VMCI: dma dg: allocate send and receive buffers for DMA datagrams

If DMA datagrams are used, allocate send and receive buffers
in coherent DMA memory.

This is done in preparation for the send and receive datagram
operations, where the buffers are used for the exchange of data
between driver and device.
Reviewed-by: default avatarVishnu Dasa <vdasa@vmware.com>
Signed-off-by: default avatarJorgen Hansen <jhansen@vmware.com>
Link: https://lore.kernel.org/r/20220207102725.2742-7-jhansen@vmware.comSigned-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent cc68f217
...@@ -31,6 +31,12 @@ ...@@ -31,6 +31,12 @@
#define VMCI_UTIL_NUM_RESOURCES 1 #define VMCI_UTIL_NUM_RESOURCES 1
/*
* Datagram buffers for DMA send/receive must accommodate at least
* a maximum sized datagram and the header.
*/
#define VMCI_DMA_DG_BUFFER_SIZE (VMCI_MAX_DG_SIZE + PAGE_SIZE)
static bool vmci_disable_msi; static bool vmci_disable_msi;
module_param_named(disable_msi, vmci_disable_msi, bool, 0); module_param_named(disable_msi, vmci_disable_msi, bool, 0);
MODULE_PARM_DESC(disable_msi, "Disable MSI use in driver - (default=0)"); MODULE_PARM_DESC(disable_msi, "Disable MSI use in driver - (default=0)");
...@@ -53,6 +59,9 @@ struct vmci_guest_device { ...@@ -53,6 +59,9 @@ struct vmci_guest_device {
struct tasklet_struct bm_tasklet; struct tasklet_struct bm_tasklet;
void *data_buffer; void *data_buffer;
dma_addr_t data_buffer_base;
void *tx_buffer;
dma_addr_t tx_buffer_base;
void *notification_bitmap; void *notification_bitmap;
dma_addr_t notification_base; dma_addr_t notification_base;
}; };
...@@ -451,6 +460,24 @@ static irqreturn_t vmci_interrupt_dma_datagram(int irq, void *_dev) ...@@ -451,6 +460,24 @@ static irqreturn_t vmci_interrupt_dma_datagram(int irq, void *_dev)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static void vmci_free_dg_buffers(struct vmci_guest_device *vmci_dev)
{
if (vmci_dev->mmio_base != NULL) {
if (vmci_dev->tx_buffer != NULL)
dma_free_coherent(vmci_dev->dev,
VMCI_DMA_DG_BUFFER_SIZE,
vmci_dev->tx_buffer,
vmci_dev->tx_buffer_base);
if (vmci_dev->data_buffer != NULL)
dma_free_coherent(vmci_dev->dev,
VMCI_DMA_DG_BUFFER_SIZE,
vmci_dev->data_buffer,
vmci_dev->data_buffer_base);
} else {
vfree(vmci_dev->data_buffer);
}
}
/* /*
* Most of the initialization at module load time is done here. * Most of the initialization at module load time is done here.
*/ */
...@@ -517,11 +544,27 @@ static int vmci_guest_probe_device(struct pci_dev *pdev, ...@@ -517,11 +544,27 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
tasklet_init(&vmci_dev->bm_tasklet, tasklet_init(&vmci_dev->bm_tasklet,
vmci_process_bitmap, (unsigned long)vmci_dev); vmci_process_bitmap, (unsigned long)vmci_dev);
vmci_dev->data_buffer = vmalloc(VMCI_MAX_DG_SIZE); if (mmio_base != NULL) {
vmci_dev->tx_buffer = dma_alloc_coherent(&pdev->dev, VMCI_DMA_DG_BUFFER_SIZE,
&vmci_dev->tx_buffer_base,
GFP_KERNEL);
if (!vmci_dev->tx_buffer) {
dev_err(&pdev->dev,
"Can't allocate memory for datagram tx buffer\n");
return -ENOMEM;
}
vmci_dev->data_buffer = dma_alloc_coherent(&pdev->dev, VMCI_DMA_DG_BUFFER_SIZE,
&vmci_dev->data_buffer_base,
GFP_KERNEL);
} else {
vmci_dev->data_buffer = vmalloc(VMCI_MAX_DG_SIZE);
}
if (!vmci_dev->data_buffer) { if (!vmci_dev->data_buffer) {
dev_err(&pdev->dev, dev_err(&pdev->dev,
"Can't allocate memory for datagram buffer\n"); "Can't allocate memory for datagram buffer\n");
return -ENOMEM; error = -ENOMEM;
goto err_free_data_buffers;
} }
pci_set_master(pdev); /* To enable queue_pair functionality. */ pci_set_master(pdev); /* To enable queue_pair functionality. */
...@@ -539,7 +582,7 @@ static int vmci_guest_probe_device(struct pci_dev *pdev, ...@@ -539,7 +582,7 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
if (!(capabilities & VMCI_CAPS_DATAGRAM)) { if (!(capabilities & VMCI_CAPS_DATAGRAM)) {
dev_err(&pdev->dev, "Device does not support datagrams\n"); dev_err(&pdev->dev, "Device does not support datagrams\n");
error = -ENXIO; error = -ENXIO;
goto err_free_data_buffer; goto err_free_data_buffers;
} }
caps_in_use = VMCI_CAPS_DATAGRAM; caps_in_use = VMCI_CAPS_DATAGRAM;
...@@ -583,7 +626,7 @@ static int vmci_guest_probe_device(struct pci_dev *pdev, ...@@ -583,7 +626,7 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
dev_err(&pdev->dev, dev_err(&pdev->dev,
"Missing capability: VMCI_CAPS_DMA_DATAGRAM\n"); "Missing capability: VMCI_CAPS_DMA_DATAGRAM\n");
error = -ENXIO; error = -ENXIO;
goto err_free_data_buffer; goto err_free_data_buffers;
} }
} }
...@@ -592,10 +635,17 @@ static int vmci_guest_probe_device(struct pci_dev *pdev, ...@@ -592,10 +635,17 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
/* Let the host know which capabilities we intend to use. */ /* Let the host know which capabilities we intend to use. */
vmci_write_reg(vmci_dev, caps_in_use, VMCI_CAPS_ADDR); vmci_write_reg(vmci_dev, caps_in_use, VMCI_CAPS_ADDR);
/* Let the device know the size for pages passed down. */ if (caps_in_use & VMCI_CAPS_DMA_DATAGRAM) {
if (caps_in_use & VMCI_CAPS_DMA_DATAGRAM) /* Let the device know the size for pages passed down. */
vmci_write_reg(vmci_dev, PAGE_SHIFT, VMCI_GUEST_PAGE_SHIFT); vmci_write_reg(vmci_dev, PAGE_SHIFT, VMCI_GUEST_PAGE_SHIFT);
/* Configure the high order parts of the data in/out buffers. */
vmci_write_reg(vmci_dev, upper_32_bits(vmci_dev->data_buffer_base),
VMCI_DATA_IN_HIGH_ADDR);
vmci_write_reg(vmci_dev, upper_32_bits(vmci_dev->tx_buffer_base),
VMCI_DATA_OUT_HIGH_ADDR);
}
/* Set up global device so that we can start sending datagrams */ /* Set up global device so that we can start sending datagrams */
spin_lock_irq(&vmci_dev_spinlock); spin_lock_irq(&vmci_dev_spinlock);
vmci_dev_g = vmci_dev; vmci_dev_g = vmci_dev;
...@@ -747,8 +797,8 @@ static int vmci_guest_probe_device(struct pci_dev *pdev, ...@@ -747,8 +797,8 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
vmci_dev_g = NULL; vmci_dev_g = NULL;
spin_unlock_irq(&vmci_dev_spinlock); spin_unlock_irq(&vmci_dev_spinlock);
err_free_data_buffer: err_free_data_buffers:
vfree(vmci_dev->data_buffer); vmci_free_dg_buffers(vmci_dev);
/* The rest are managed resources and will be freed by PCI core */ /* The rest are managed resources and will be freed by PCI core */
return error; return error;
...@@ -806,7 +856,10 @@ static void vmci_guest_remove_device(struct pci_dev *pdev) ...@@ -806,7 +856,10 @@ static void vmci_guest_remove_device(struct pci_dev *pdev)
vmci_dev->notification_base); vmci_dev->notification_base);
} }
vfree(vmci_dev->data_buffer); vmci_free_dg_buffers(vmci_dev);
if (vmci_dev->mmio_base != NULL)
pci_iounmap(pdev, vmci_dev->mmio_base);
/* The rest are managed resources and will be freed by PCI core */ /* The rest are managed resources and will be freed by PCI core */
} }
......
...@@ -21,6 +21,10 @@ ...@@ -21,6 +21,10 @@
#define VMCI_CAPS_ADDR 0x18 #define VMCI_CAPS_ADDR 0x18
#define VMCI_RESULT_LOW_ADDR 0x1c #define VMCI_RESULT_LOW_ADDR 0x1c
#define VMCI_RESULT_HIGH_ADDR 0x20 #define VMCI_RESULT_HIGH_ADDR 0x20
#define VMCI_DATA_OUT_LOW_ADDR 0x24
#define VMCI_DATA_OUT_HIGH_ADDR 0x28
#define VMCI_DATA_IN_LOW_ADDR 0x2c
#define VMCI_DATA_IN_HIGH_ADDR 0x30
#define VMCI_GUEST_PAGE_SHIFT 0x34 #define VMCI_GUEST_PAGE_SHIFT 0x34
/* Max number of devices. */ /* Max number of devices. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment