Commit b40f451d authored by Chris Metcalf's avatar Chris Metcalf

tile PCI RC: make default consistent DMA mask 32-bit

This change sets the PCI devices' initial DMA capabilities
conservatively and promotes them at the request of the driver,
as opposed to assuming advanced DMA capabilities. The old design
runs the risk of breaking drivers that assume default capabilities.
Signed-off-by: default avatarChris Metcalf <cmetcalf@tilera.com>
parent 6d715790
...@@ -23,7 +23,10 @@ struct dev_archdata { ...@@ -23,7 +23,10 @@ struct dev_archdata {
/* Offset of the DMA address from the PA. */ /* Offset of the DMA address from the PA. */
dma_addr_t dma_offset; dma_addr_t dma_offset;
/* Highest DMA address that can be generated by this device. */ /*
* Highest DMA address that can be generated by devices that
* have limited DMA capability, i.e. non 64-bit capable.
*/
dma_addr_t max_direct_dma_addr; dma_addr_t max_direct_dma_addr;
}; };
......
...@@ -92,14 +92,19 @@ dma_set_mask(struct device *dev, u64 mask) ...@@ -92,14 +92,19 @@ dma_set_mask(struct device *dev, u64 mask)
{ {
struct dma_map_ops *dma_ops = get_dma_ops(dev); struct dma_map_ops *dma_ops = get_dma_ops(dev);
/* Handle legacy PCI devices with limited memory addressability. */ /*
if ((dma_ops == gx_pci_dma_map_ops || * For PCI devices with 64-bit DMA addressing capability, promote
* the dma_ops to hybrid, with the consistent memory DMA space limited
* to 32-bit. For 32-bit capable devices, limit the streaming DMA
* address range to max_direct_dma_addr.
*/
if (dma_ops == gx_pci_dma_map_ops ||
dma_ops == gx_hybrid_pci_dma_map_ops || dma_ops == gx_hybrid_pci_dma_map_ops ||
dma_ops == gx_legacy_pci_dma_map_ops) && dma_ops == gx_legacy_pci_dma_map_ops) {
(mask <= DMA_BIT_MASK(32))) { if (mask == DMA_BIT_MASK(64) &&
set_dma_ops(dev, gx_legacy_pci_dma_map_ops); dma_ops == gx_legacy_pci_dma_map_ops)
set_dma_offset(dev, 0); set_dma_ops(dev, gx_hybrid_pci_dma_map_ops);
if (mask > dev->archdata.max_direct_dma_addr) else if (mask > dev->archdata.max_direct_dma_addr)
mask = dev->archdata.max_direct_dma_addr; mask = dev->archdata.max_direct_dma_addr;
} }
......
...@@ -588,15 +588,18 @@ int dma_set_coherent_mask(struct device *dev, u64 mask) ...@@ -588,15 +588,18 @@ int dma_set_coherent_mask(struct device *dev, u64 mask)
{ {
struct dma_map_ops *dma_ops = get_dma_ops(dev); struct dma_map_ops *dma_ops = get_dma_ops(dev);
/* Handle hybrid PCI devices with limited memory addressability. */ /*
if ((dma_ops == gx_pci_dma_map_ops || * For PCI devices with 64-bit DMA addressing capability, promote
* the dma_ops to full capability for both streams and consistent
* memory access. For 32-bit capable devices, limit the consistent
* memory DMA range to max_direct_dma_addr.
*/
if (dma_ops == gx_pci_dma_map_ops ||
dma_ops == gx_hybrid_pci_dma_map_ops || dma_ops == gx_hybrid_pci_dma_map_ops ||
dma_ops == gx_legacy_pci_dma_map_ops) && dma_ops == gx_legacy_pci_dma_map_ops) {
(mask <= DMA_BIT_MASK(32))) { if (mask == DMA_BIT_MASK(64))
if (dma_ops == gx_pci_dma_map_ops) set_dma_ops(dev, gx_pci_dma_map_ops);
set_dma_ops(dev, gx_hybrid_pci_dma_map_ops); else if (mask > dev->archdata.max_direct_dma_addr)
if (mask > dev->archdata.max_direct_dma_addr)
mask = dev->archdata.max_direct_dma_addr; mask = dev->archdata.max_direct_dma_addr;
} }
......
...@@ -1081,13 +1081,24 @@ int pcibios_enable_device(struct pci_dev *dev, int mask) ...@@ -1081,13 +1081,24 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
return pci_enable_resources(dev, mask); return pci_enable_resources(dev, mask);
} }
/* Called for each device after PCI setup is done. */ /*
* Called for each device after PCI setup is done.
* We initialize the PCI device capabilities conservatively, assuming that
* all devices can only address the 32-bit DMA space. The exception here is
* that the device dma_offset is set to the value that matches the 64-bit
* capable devices. This is OK because dma_offset is not used by legacy
* dma_ops, nor by the hybrid dma_ops's streaming DMAs, which are 64-bit ops.
* This implementation matches the kernel design of setting PCI devices'
* coherent_dma_mask to 0xffffffffull by default, allowing the device drivers
* to skip calling pci_set_consistent_dma_mask(DMA_BIT_MASK(32)).
*/
static void pcibios_fixup_final(struct pci_dev *pdev) static void pcibios_fixup_final(struct pci_dev *pdev)
{ {
set_dma_ops(&pdev->dev, gx_pci_dma_map_ops); set_dma_ops(&pdev->dev, gx_legacy_pci_dma_map_ops);
set_dma_offset(&pdev->dev, TILE_PCI_MEM_MAP_BASE_OFFSET); set_dma_offset(&pdev->dev, TILE_PCI_MEM_MAP_BASE_OFFSET);
pdev->dev.archdata.max_direct_dma_addr = pdev->dev.archdata.max_direct_dma_addr =
TILE_PCI_MAX_DIRECT_DMA_ADDRESS; TILE_PCI_MAX_DIRECT_DMA_ADDRESS;
pdev->dev.coherent_dma_mask = TILE_PCI_MAX_DIRECT_DMA_ADDRESS;
} }
DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_final); DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_final);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment