Commit f2115faa authored by Rafael J. Wysocki's avatar Rafael J. Wysocki

Merge branch 'acpi-pci'

* acpi-pci:
  PCI: ACPI: Add support for PCI device DMA coherency
  PCI: OF: Move of_pci_dma_configure() to pci_dma_configure()
  of/pci: Fix pci_get_host_bridge_device leak
  device property: ACPI: Remove unused DMA APIs
  device property: ACPI: Make use of the new DMA Attribute APIs
  device property: Adding DMA Attribute APIs for Generic Devices
  ACPI: Adding DMA Attribute APIs for ACPI Device
  device property: Introducing enum dev_dma_attr
  ACPI: Honor ACPI _CCA attribute setting

Conflicts:
	drivers/crypto/ccp/ccp-platform.c
parents 0d51ce9c 29dbe1f0
......@@ -103,7 +103,12 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev)
pdevinfo.res = resources;
pdevinfo.num_res = count;
pdevinfo.fwnode = acpi_fwnode_handle(adev);
pdevinfo.dma_mask = acpi_check_dma(adev, NULL) ? DMA_BIT_MASK(32) : 0;
if (acpi_dma_supported(adev))
pdevinfo.dma_mask = DMA_BIT_MASK(32);
else
pdevinfo.dma_mask = 0;
pdev = platform_device_register_full(&pdevinfo);
if (IS_ERR(pdev))
dev_err(&adev->dev, "platform device creation failed: %ld\n",
......
......@@ -168,7 +168,7 @@ int acpi_bind_one(struct device *dev, struct acpi_device *acpi_dev)
struct list_head *physnode_list;
unsigned int node_id;
int retval = -EINVAL;
bool coherent;
enum dev_dma_attr attr;
if (has_acpi_companion(dev)) {
if (acpi_dev) {
......@@ -225,8 +225,10 @@ int acpi_bind_one(struct device *dev, struct acpi_device *acpi_dev)
if (!has_acpi_companion(dev))
ACPI_COMPANION_SET(dev, acpi_dev);
if (acpi_check_dma(acpi_dev, &coherent))
arch_setup_dma_ops(dev, 0, 0, NULL, coherent);
attr = acpi_get_dma_attr(acpi_dev);
if (attr != DEV_DMA_NOT_SUPPORTED)
arch_setup_dma_ops(dev, 0, 0, NULL,
attr == DEV_DMA_COHERENT);
acpi_physnode_link_name(physical_node_name, node_id);
retval = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj,
......
......@@ -1308,6 +1308,48 @@ void acpi_free_pnp_ids(struct acpi_device_pnp *pnp)
kfree(pnp->unique_id);
}
/**
* acpi_dma_supported - Check DMA support for the specified device.
* @adev: The pointer to acpi device
*
* Return false if DMA is not supported. Otherwise, return true
*/
bool acpi_dma_supported(struct acpi_device *adev)
{
if (!adev)
return false;
if (adev->flags.cca_seen)
return true;
/*
* Per ACPI 6.0 sec 6.2.17, assume devices can do cache-coherent
* DMA on "Intel platforms". Presumably that includes all x86 and
* ia64, and other arches will set CONFIG_ACPI_CCA_REQUIRED=y.
*/
if (!IS_ENABLED(CONFIG_ACPI_CCA_REQUIRED))
return true;
return false;
}
/**
* acpi_get_dma_attr - Check the supported DMA attr for the specified device.
* @adev: The pointer to acpi device
*
* Return enum dev_dma_attr.
*/
enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev)
{
if (!acpi_dma_supported(adev))
return DEV_DMA_NOT_SUPPORTED;
if (adev->flags.coherent_dma)
return DEV_DMA_COHERENT;
else
return DEV_DMA_NON_COHERENT;
}
static void acpi_init_coherency(struct acpi_device *adev)
{
unsigned long long cca = 0;
......
......@@ -598,18 +598,34 @@ unsigned int device_get_child_node_count(struct device *dev)
}
EXPORT_SYMBOL_GPL(device_get_child_node_count);
bool device_dma_is_coherent(struct device *dev)
bool device_dma_supported(struct device *dev)
{
bool coherent = false;
/* For DT, this is always supported.
* For ACPI, this depends on CCA, which
* is determined by the acpi_dma_supported().
*/
if (IS_ENABLED(CONFIG_OF) && dev->of_node)
coherent = of_dma_is_coherent(dev->of_node);
else
acpi_check_dma(ACPI_COMPANION(dev), &coherent);
return true;
return acpi_dma_supported(ACPI_COMPANION(dev));
}
EXPORT_SYMBOL_GPL(device_dma_supported);
return coherent;
enum dev_dma_attr device_get_dma_attr(struct device *dev)
{
enum dev_dma_attr attr = DEV_DMA_NOT_SUPPORTED;
if (IS_ENABLED(CONFIG_OF) && dev->of_node) {
if (of_dma_is_coherent(dev->of_node))
attr = DEV_DMA_COHERENT;
else
attr = DEV_DMA_NON_COHERENT;
} else
attr = acpi_get_dma_attr(ACPI_COMPANION(dev));
return attr;
}
EXPORT_SYMBOL_GPL(device_dma_is_coherent);
EXPORT_SYMBOL_GPL(device_get_dma_attr);
/**
* device_get_phy_mode - Get phy mode for given device
......
......@@ -94,6 +94,7 @@ static int ccp_platform_probe(struct platform_device *pdev)
struct ccp_device *ccp;
struct ccp_platform *ccp_platform;
struct device *dev = &pdev->dev;
enum dev_dma_attr attr;
struct resource *ior;
int ret;
......@@ -118,18 +119,24 @@ static int ccp_platform_probe(struct platform_device *pdev)
}
ccp->io_regs = ccp->io_map;
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
if (ret) {
dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret);
attr = device_get_dma_attr(dev);
if (attr == DEV_DMA_NOT_SUPPORTED) {
dev_err(dev, "DMA is not supported");
goto e_err;
}
ccp_platform->coherent = device_dma_is_coherent(ccp->dev);
ccp_platform->coherent = (attr == DEV_DMA_COHERENT);
if (ccp_platform->coherent)
ccp->axcache = CACHE_WB_NO_ALLOC;
else
ccp->axcache = CACHE_NONE;
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
if (ret) {
dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret);
goto e_err;
}
dev_set_drvdata(dev, ccp);
ret = ccp_init(ccp);
......
......@@ -342,6 +342,7 @@ static int xgbe_probe(struct platform_device *pdev)
struct resource *res;
const char *phy_mode;
unsigned int i, phy_memnum, phy_irqnum;
enum dev_dma_attr attr;
int ret;
DBGPR("--> xgbe_probe\n");
......@@ -609,7 +610,12 @@ static int xgbe_probe(struct platform_device *pdev)
goto err_io;
/* Set the DMA coherency values */
pdata->coherent = device_dma_is_coherent(pdata->dev);
attr = device_get_dma_attr(dev);
if (attr == DEV_DMA_NOT_SUPPORTED) {
dev_err(dev, "DMA is not supported");
goto err_io;
}
pdata->coherent = (attr == DEV_DMA_COHERENT);
if (pdata->coherent) {
pdata->axdomain = XGBE_DMA_OS_AXDOMAIN;
pdata->arcache = XGBE_DMA_OS_ARCACHE;
......
......@@ -117,26 +117,6 @@ int of_get_pci_domain_nr(struct device_node *node)
}
EXPORT_SYMBOL_GPL(of_get_pci_domain_nr);
/**
* of_pci_dma_configure - Setup DMA configuration
* @dev: ptr to pci_dev struct of the PCI device
*
* Function to update PCI devices's DMA configuration using the same
* info from the OF node of host bridge's parent (if any).
*/
void of_pci_dma_configure(struct pci_dev *pci_dev)
{
struct device *dev = &pci_dev->dev;
struct device *bridge = pci_get_host_bridge_device(pci_dev);
if (!bridge->parent)
return;
of_dma_configure(dev, bridge->parent->of_node);
pci_put_host_bridge_device(bridge);
}
EXPORT_SYMBOL_GPL(of_pci_dma_configure);
#if defined(CONFIG_OF_ADDRESS)
/**
* of_pci_get_host_bridge_resources - Parse PCI host bridge resources from DT
......
......@@ -6,12 +6,14 @@
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/of_device.h>
#include <linux/of_pci.h>
#include <linux/pci_hotplug.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/cpumask.h>
#include <linux/pci-aspm.h>
#include <linux/acpi.h>
#include <asm-generic/pci-bridge.h>
#include "pci.h"
......@@ -1666,6 +1668,34 @@ static void pci_set_msi_domain(struct pci_dev *dev)
dev_set_msi_domain(&dev->dev, d);
}
/**
* pci_dma_configure - Setup DMA configuration
* @dev: ptr to pci_dev struct of the PCI device
*
* Function to update PCI devices's DMA configuration using the same
* info from the OF node or ACPI node of host bridge's parent (if any).
*/
static void pci_dma_configure(struct pci_dev *dev)
{
struct device *bridge = pci_get_host_bridge_device(dev);
if (IS_ENABLED(CONFIG_OF) && dev->dev.of_node) {
if (bridge->parent)
of_dma_configure(&dev->dev, bridge->parent->of_node);
} else if (has_acpi_companion(bridge)) {
struct acpi_device *adev = to_acpi_device_node(bridge->fwnode);
enum dev_dma_attr attr = acpi_get_dma_attr(adev);
if (attr == DEV_DMA_NOT_SUPPORTED)
dev_warn(&dev->dev, "DMA not supported.\n");
else
arch_setup_dma_ops(&dev->dev, 0, 0, NULL,
attr == DEV_DMA_COHERENT);
}
pci_put_host_bridge_device(bridge);
}
void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
{
int ret;
......@@ -1679,7 +1709,7 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
dev->dev.dma_mask = &dev->dma_mask;
dev->dev.dma_parms = &dev->dma_parms;
dev->dev.coherent_dma_mask = 0xffffffffull;
of_pci_dma_configure(dev);
pci_dma_configure(dev);
pci_set_dma_max_seg_size(dev, 65536);
pci_set_dma_seg_boundary(dev, 0xffffffff);
......
......@@ -390,39 +390,6 @@ struct acpi_data_node {
struct completion kobj_done;
};
static inline bool acpi_check_dma(struct acpi_device *adev, bool *coherent)
{
bool ret = false;
if (!adev)
return ret;
/**
* Currently, we only support _CCA=1 (i.e. coherent_dma=1)
* This should be equivalent to specifyig dma-coherent for
* a device in OF.
*
* For the case when _CCA=0 (i.e. coherent_dma=0 && cca_seen=1),
* There are two cases:
* case 1. Do not support and disable DMA.
* case 2. Support but rely on arch-specific cache maintenance for
* non-coherence DMA operations.
* Currently, we implement case 1 above.
*
* For the case when _CCA is missing (i.e. cca_seen=0) and
* platform specifies ACPI_CCA_REQUIRED, we do not support DMA,
* and fallback to arch-specific default handling.
*
* See acpi_init_coherency() for more info.
*/
if (adev->flags.coherent_dma) {
ret = true;
if (coherent)
*coherent = adev->flags.coherent_dma;
}
return ret;
}
static inline bool is_acpi_node(struct fwnode_handle *fwnode)
{
return fwnode && (fwnode->type == FWNODE_ACPI
......@@ -595,6 +562,9 @@ struct acpi_pci_root {
/* helper */
bool acpi_dma_supported(struct acpi_device *adev);
enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev);
struct acpi_device *acpi_find_child_device(struct acpi_device *parent,
u64 address, bool check_children);
int acpi_is_root_bridge(acpi_handle);
......
......@@ -596,11 +596,16 @@ static inline int acpi_device_modalias(struct device *dev,
return -ENODEV;
}
static inline bool acpi_check_dma(struct acpi_device *adev, bool *coherent)
static inline bool acpi_dma_supported(struct acpi_device *adev)
{
return false;
}
static inline enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev)
{
return DEV_DMA_NOT_SUPPORTED;
}
#define ACPI_PTR(_ptr) (NULL)
#endif /* !CONFIG_ACPI */
......
......@@ -16,7 +16,6 @@ int of_pci_get_devfn(struct device_node *np);
int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin);
int of_pci_parse_bus_range(struct device_node *node, struct resource *res);
int of_get_pci_domain_nr(struct device_node *node);
void of_pci_dma_configure(struct pci_dev *pci_dev);
#else
static inline int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq)
{
......@@ -51,8 +50,6 @@ of_get_pci_domain_nr(struct device_node *node)
{
return -1;
}
static inline void of_pci_dma_configure(struct pci_dev *pci_dev) { }
#endif
#if defined(CONFIG_OF_ADDRESS)
......
......@@ -27,6 +27,12 @@ enum dev_prop_type {
DEV_PROP_MAX,
};
enum dev_dma_attr {
DEV_DMA_NOT_SUPPORTED,
DEV_DMA_NON_COHERENT,
DEV_DMA_COHERENT,
};
bool device_property_present(struct device *dev, const char *propname);
int device_property_read_u8_array(struct device *dev, const char *propname,
u8 *val, size_t nval);
......@@ -168,7 +174,9 @@ struct property_set {
void device_add_property_set(struct device *dev, struct property_set *pset);
bool device_dma_is_coherent(struct device *dev);
bool device_dma_supported(struct device *dev);
enum dev_dma_attr device_get_dma_attr(struct device *dev);
int device_get_phy_mode(struct device *dev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment