Commit ec18d379 authored by Linus Torvalds's avatar Linus Torvalds

Merge bk://gkernel.bkbits.net/net-drivers-2.6

into ppc970.osdl.org:/home/torvalds/v2.6/linux
parents def7102a 7e093c30
......@@ -13,14 +13,23 @@
#include <asm/scatterlist.h>
#include <asm/bug.h>
int dma_supported(struct device *dev, u64 mask)
static struct dma_mapping_ops *get_dma_ops(struct device *dev)
{
if (dev->bus == &pci_bus_type)
return pci_dma_supported(to_pci_dev(dev), mask);
return &pci_dma_ops;
#ifdef CONFIG_IBMVIO
if (dev->bus == &vio_bus_type)
return vio_dma_supported(to_vio_dev(dev), mask);
#endif /* CONFIG_IBMVIO */
return &vio_dma_ops;
#endif
return NULL;
}
int dma_supported(struct device *dev, u64 mask)
{
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
if (dma_ops)
return dma_ops->dma_supported(dev, mask);
BUG();
return 0;
}
......@@ -32,7 +41,7 @@ int dma_set_mask(struct device *dev, u64 dma_mask)
return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
#ifdef CONFIG_IBMVIO
if (dev->bus == &vio_bus_type)
return vio_set_dma_mask(to_vio_dev(dev), dma_mask);
return -EIO;
#endif /* CONFIG_IBMVIO */
BUG();
return 0;
......@@ -42,12 +51,10 @@ EXPORT_SYMBOL(dma_set_mask);
void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, int flag)
{
if (dev->bus == &pci_bus_type)
return pci_alloc_consistent(to_pci_dev(dev), size, dma_handle);
#ifdef CONFIG_IBMVIO
if (dev->bus == &vio_bus_type)
return vio_alloc_consistent(to_vio_dev(dev), size, dma_handle);
#endif /* CONFIG_IBMVIO */
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
if (dma_ops)
return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
BUG();
return NULL;
}
......@@ -56,12 +63,10 @@ EXPORT_SYMBOL(dma_alloc_coherent);
void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_handle)
{
if (dev->bus == &pci_bus_type)
pci_free_consistent(to_pci_dev(dev), size, cpu_addr, dma_handle);
#ifdef CONFIG_IBMVIO
else if (dev->bus == &vio_bus_type)
vio_free_consistent(to_vio_dev(dev), size, cpu_addr, dma_handle);
#endif /* CONFIG_IBMVIO */
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
if (dma_ops)
dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
else
BUG();
}
......@@ -70,12 +75,10 @@ EXPORT_SYMBOL(dma_free_coherent);
dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, size_t size,
enum dma_data_direction direction)
{
if (dev->bus == &pci_bus_type)
return pci_map_single(to_pci_dev(dev), cpu_addr, size, (int)direction);
#ifdef CONFIG_IBMVIO
if (dev->bus == &vio_bus_type)
return vio_map_single(to_vio_dev(dev), cpu_addr, size, direction);
#endif /* CONFIG_IBMVIO */
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
if (dma_ops)
return dma_ops->map_single(dev, cpu_addr, size, direction);
BUG();
return (dma_addr_t)0;
}
......@@ -84,12 +87,10 @@ EXPORT_SYMBOL(dma_map_single);
void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
enum dma_data_direction direction)
{
if (dev->bus == &pci_bus_type)
pci_unmap_single(to_pci_dev(dev), dma_addr, size, (int)direction);
#ifdef CONFIG_IBMVIO
else if (dev->bus == &vio_bus_type)
vio_unmap_single(to_vio_dev(dev), dma_addr, size, direction);
#endif /* CONFIG_IBMVIO */
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
if (dma_ops)
dma_ops->unmap_single(dev, dma_addr, size, direction);
else
BUG();
}
......@@ -99,12 +100,11 @@ dma_addr_t dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction direction)
{
if (dev->bus == &pci_bus_type)
return pci_map_page(to_pci_dev(dev), page, offset, size, (int)direction);
#ifdef CONFIG_IBMVIO
if (dev->bus == &vio_bus_type)
return vio_map_page(to_vio_dev(dev), page, offset, size, direction);
#endif /* CONFIG_IBMVIO */
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
if (dma_ops)
return dma_ops->map_single(dev,
(page_address(page) + offset), size, direction);
BUG();
return (dma_addr_t)0;
}
......@@ -113,12 +113,10 @@ EXPORT_SYMBOL(dma_map_page);
void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
enum dma_data_direction direction)
{
if (dev->bus == &pci_bus_type)
pci_unmap_page(to_pci_dev(dev), dma_address, size, (int)direction);
#ifdef CONFIG_IBMVIO
else if (dev->bus == &vio_bus_type)
vio_unmap_page(to_vio_dev(dev), dma_address, size, direction);
#endif /* CONFIG_IBMVIO */
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
if (dma_ops)
dma_ops->unmap_single(dev, dma_address, size, direction);
else
BUG();
}
......@@ -127,12 +125,10 @@ EXPORT_SYMBOL(dma_unmap_page);
int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction direction)
{
if (dev->bus == &pci_bus_type)
return pci_map_sg(to_pci_dev(dev), sg, nents, (int)direction);
#ifdef CONFIG_IBMVIO
if (dev->bus == &vio_bus_type)
return vio_map_sg(to_vio_dev(dev), sg, nents, direction);
#endif /* CONFIG_IBMVIO */
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
if (dma_ops)
return dma_ops->map_sg(dev, sg, nents, direction);
BUG();
return 0;
}
......@@ -141,12 +137,10 @@ EXPORT_SYMBOL(dma_map_sg);
void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
enum dma_data_direction direction)
{
if (dev->bus == &pci_bus_type)
pci_unmap_sg(to_pci_dev(dev), sg, nhwentries, (int)direction);
#ifdef CONFIG_IBMVIO
else if (dev->bus == &vio_bus_type)
vio_unmap_sg(to_vio_dev(dev), sg, nhwentries, direction);
#endif /* CONFIG_IBMVIO */
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
if (dma_ops)
dma_ops->unmap_sg(dev, sg, nhwentries, direction);
else
BUG();
}
......
......@@ -513,8 +513,8 @@ void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle,
* Returns the virtual address of the buffer and sets dma_handle
* to the dma address (mapping) of the first page.
*/
void *iommu_alloc_consistent(struct iommu_table *tbl, size_t size,
dma_addr_t *dma_handle)
void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
dma_addr_t *dma_handle, int flag)
{
void *ret = NULL;
dma_addr_t mapping;
......@@ -538,7 +538,7 @@ void *iommu_alloc_consistent(struct iommu_table *tbl, size_t size,
return NULL;
/* Alloc enough pages (and possibly more) */
ret = (void *)__get_free_pages(GFP_ATOMIC, order);
ret = (void *)__get_free_pages(flag, order);
if (!ret)
return NULL;
memset(ret, 0, size);
......@@ -553,7 +553,7 @@ void *iommu_alloc_consistent(struct iommu_table *tbl, size_t size,
return ret;
}
void iommu_free_consistent(struct iommu_table *tbl, size_t size,
void iommu_free_coherent(struct iommu_table *tbl, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
unsigned int npages;
......
......@@ -322,8 +322,8 @@ static void __init pSeries_discover_pic(void)
static void pSeries_mach_cpu_die(void)
{
idle_task_exit();
local_irq_disable();
idle_task_exit();
/* Some hardware requires clearing the CPPR, while other hardware does not
* it is safe either way
*/
......
......@@ -151,7 +151,7 @@ static unsigned int find_physical_cpu_to_start(unsigned int old_hwindex)
if (index) {
int state;
int rc = rtas_get_sensor(9003, *index, &state);
if (rc != 0 || state != 1)
if (rc < 0 || state != 1)
continue;
}
......
......@@ -71,7 +71,7 @@ void iSeries_pcibios_init(void);
LIST_HEAD(hose_list);
struct pci_dma_ops pci_dma_ops;
struct dma_mapping_ops pci_dma_ops;
EXPORT_SYMBOL(pci_dma_ops);
int global_phb_number; /* Global phb counter */
......
......@@ -30,12 +30,12 @@
#include "pci.h"
static void *pci_direct_alloc_consistent(struct pci_dev *hwdev, size_t size,
dma_addr_t *dma_handle)
static void *pci_direct_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, int flag)
{
void *ret;
ret = (void *)__get_free_pages(GFP_ATOMIC, get_order(size));
ret = (void *)__get_free_pages(flag, get_order(size));
if (ret != NULL) {
memset(ret, 0, size);
*dma_handle = virt_to_abs(ret);
......@@ -43,24 +43,24 @@ static void *pci_direct_alloc_consistent(struct pci_dev *hwdev, size_t size,
return ret;
}
static void pci_direct_free_consistent(struct pci_dev *hwdev, size_t size,
static void pci_direct_free_coherent(struct device *hwdev, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
free_pages((unsigned long)vaddr, get_order(size));
}
static dma_addr_t pci_direct_map_single(struct pci_dev *hwdev, void *ptr,
static dma_addr_t pci_direct_map_single(struct device *hwdev, void *ptr,
size_t size, enum dma_data_direction direction)
{
return virt_to_abs(ptr);
}
static void pci_direct_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
static void pci_direct_unmap_single(struct device *hwdev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction direction)
{
}
static int pci_direct_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
static int pci_direct_map_sg(struct device *hwdev, struct scatterlist *sg,
int nents, enum dma_data_direction direction)
{
int i;
......@@ -73,17 +73,23 @@ static int pci_direct_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
return nents;
}
static void pci_direct_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg,
static void pci_direct_unmap_sg(struct device *hwdev, struct scatterlist *sg,
int nents, enum dma_data_direction direction)
{
}
static int pci_direct_dma_supported(struct device *dev, u64 mask)
{
return mask < 0x100000000ull;
}
void __init pci_direct_iommu_init(void)
{
pci_dma_ops.pci_alloc_consistent = pci_direct_alloc_consistent;
pci_dma_ops.pci_free_consistent = pci_direct_free_consistent;
pci_dma_ops.pci_map_single = pci_direct_map_single;
pci_dma_ops.pci_unmap_single = pci_direct_unmap_single;
pci_dma_ops.pci_map_sg = pci_direct_map_sg;
pci_dma_ops.pci_unmap_sg = pci_direct_unmap_sg;
pci_dma_ops.alloc_coherent = pci_direct_alloc_coherent;
pci_dma_ops.free_coherent = pci_direct_free_coherent;
pci_dma_ops.map_single = pci_direct_map_single;
pci_dma_ops.unmap_single = pci_direct_unmap_single;
pci_dma_ops.map_sg = pci_direct_map_sg;
pci_dma_ops.unmap_sg = pci_direct_unmap_sg;
pci_dma_ops.dma_supported = pci_direct_dma_supported;
}
......@@ -50,19 +50,23 @@
*/
#define PCI_GET_DN(dev) ((struct device_node *)((dev)->sysdata))
static inline struct iommu_table *devnode_table(struct pci_dev *dev)
static inline struct iommu_table *devnode_table(struct device *dev)
{
if (!dev)
dev = ppc64_isabridge_dev;
if (!dev)
struct pci_dev *pdev;
if (!dev) {
pdev = ppc64_isabridge_dev;
if (!pdev)
return NULL;
} else
pdev = to_pci_dev(dev);
#ifdef CONFIG_PPC_ISERIES
return ISERIES_DEVNODE(dev)->iommu_table;
return ISERIES_DEVNODE(pdev)->iommu_table;
#endif /* CONFIG_PPC_ISERIES */
#ifdef CONFIG_PPC_MULTIPLATFORM
return PCI_GET_DN(dev)->iommu_table;
return PCI_GET_DN(pdev)->iommu_table;
#endif /* CONFIG_PPC_MULTIPLATFORM */
}
......@@ -71,16 +75,17 @@ static inline struct iommu_table *devnode_table(struct pci_dev *dev)
* Returns the virtual address of the buffer and sets dma_handle
* to the dma address (mapping) of the first page.
*/
static void *pci_iommu_alloc_consistent(struct pci_dev *hwdev, size_t size,
dma_addr_t *dma_handle)
static void *pci_iommu_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, int flag)
{
return iommu_alloc_consistent(devnode_table(hwdev), size, dma_handle);
return iommu_alloc_coherent(devnode_table(hwdev), size, dma_handle,
flag);
}
static void pci_iommu_free_consistent(struct pci_dev *hwdev, size_t size,
static void pci_iommu_free_coherent(struct device *hwdev, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
iommu_free_consistent(devnode_table(hwdev), size, vaddr, dma_handle);
iommu_free_coherent(devnode_table(hwdev), size, vaddr, dma_handle);
}
/* Creates TCEs for a user provided buffer. The user buffer must be
......@@ -89,46 +94,46 @@ static void pci_iommu_free_consistent(struct pci_dev *hwdev, size_t size,
* need not be page aligned, the dma_addr_t returned will point to the same
* byte within the page as vaddr.
*/
static dma_addr_t pci_iommu_map_single(struct pci_dev *hwdev, void *vaddr,
static dma_addr_t pci_iommu_map_single(struct device *hwdev, void *vaddr,
size_t size, enum dma_data_direction direction)
{
return iommu_map_single(devnode_table(hwdev), vaddr, size, direction);
}
static void pci_iommu_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_handle,
static void pci_iommu_unmap_single(struct device *hwdev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction direction)
{
iommu_unmap_single(devnode_table(hwdev), dma_handle, size, direction);
}
static int pci_iommu_map_sg(struct pci_dev *pdev, struct scatterlist *sglist,
static int pci_iommu_map_sg(struct device *pdev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction)
{
return iommu_map_sg(&pdev->dev, devnode_table(pdev), sglist,
return iommu_map_sg(pdev, devnode_table(pdev), sglist,
nelems, direction);
}
static void pci_iommu_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist,
static void pci_iommu_unmap_sg(struct device *pdev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction)
{
iommu_unmap_sg(devnode_table(pdev), sglist, nelems, direction);
}
/* We support DMA to/from any memory page via the iommu */
static int pci_iommu_dma_supported(struct pci_dev *pdev, u64 mask)
static int pci_iommu_dma_supported(struct device *dev, u64 mask)
{
return 1;
}
void pci_iommu_init(void)
{
pci_dma_ops.pci_alloc_consistent = pci_iommu_alloc_consistent;
pci_dma_ops.pci_free_consistent = pci_iommu_free_consistent;
pci_dma_ops.pci_map_single = pci_iommu_map_single;
pci_dma_ops.pci_unmap_single = pci_iommu_unmap_single;
pci_dma_ops.pci_map_sg = pci_iommu_map_sg;
pci_dma_ops.pci_unmap_sg = pci_iommu_unmap_sg;
pci_dma_ops.pci_dma_supported = pci_iommu_dma_supported;
pci_dma_ops.alloc_coherent = pci_iommu_alloc_coherent;
pci_dma_ops.free_coherent = pci_iommu_free_coherent;
pci_dma_ops.map_single = pci_iommu_map_single;
pci_dma_ops.unmap_single = pci_iommu_unmap_single;
pci_dma_ops.map_sg = pci_iommu_map_sg;
pci_dma_ops.unmap_sg = pci_iommu_unmap_sg;
pci_dma_ops.dma_supported = pci_iommu_dma_supported;
}
......@@ -255,29 +255,59 @@ rtas_extended_busy_delay_time(int status)
return ms;
}
int
rtas_get_power_level(int powerdomain, int *level)
int rtas_error_rc(int rtas_rc)
{
int rc;
switch (rtas_rc) {
case -1: /* Hardware Error */
rc = -EIO;
break;
case -3: /* Bad indicator/domain/etc */
rc = -EINVAL;
break;
case -9000: /* Isolation error */
rc = -EFAULT;
break;
case -9001: /* Outstanding TCE/PTE */
rc = -EEXIST;
break;
case -9002: /* No usable slot */
rc = -ENODEV;
break;
default:
printk(KERN_ERR "%s: unexpected RTAS error %d\n",
__FUNCTION__, rtas_rc);
rc = -ERANGE;
break;
}
return rc;
}
int rtas_get_power_level(int powerdomain, int *level)
{
int token = rtas_token("get-power-level");
int rc;
if (token == RTAS_UNKNOWN_SERVICE)
return RTAS_UNKNOWN_OP;
return -ENOENT;
while ((rc = rtas_call(token, 1, 2, level, powerdomain)) == RTAS_BUSY)
udelay(1);
if (rc < 0)
return rtas_error_rc(rc);
return rc;
}
int
rtas_set_power_level(int powerdomain, int level, int *setlevel)
int rtas_set_power_level(int powerdomain, int level, int *setlevel)
{
int token = rtas_token("set-power-level");
unsigned int wait_time;
int rc;
if (token == RTAS_UNKNOWN_SERVICE)
return RTAS_UNKNOWN_OP;
return -ENOENT;
while (1) {
rc = rtas_call(token, 2, 2, setlevel, powerdomain, level);
......@@ -289,18 +319,20 @@ rtas_set_power_level(int powerdomain, int level, int *setlevel)
} else
break;
}
if (rc < 0)
return rtas_error_rc(rc);
return rc;
}
int
rtas_get_sensor(int sensor, int index, int *state)
int rtas_get_sensor(int sensor, int index, int *state)
{
int token = rtas_token("get-sensor-state");
unsigned int wait_time;
int rc;
if (token == RTAS_UNKNOWN_SERVICE)
return RTAS_UNKNOWN_OP;
return -ENOENT;
while (1) {
rc = rtas_call(token, 2, 2, state, sensor, index);
......@@ -312,18 +344,20 @@ rtas_get_sensor(int sensor, int index, int *state)
} else
break;
}
if (rc < 0)
return rtas_error_rc(rc);
return rc;
}
int
rtas_set_indicator(int indicator, int index, int new_value)
int rtas_set_indicator(int indicator, int index, int new_value)
{
int token = rtas_token("set-indicator");
unsigned int wait_time;
int rc;
if (token == RTAS_UNKNOWN_SERVICE)
return RTAS_UNKNOWN_OP;
return -ENOENT;
while (1) {
rc = rtas_call(token, 3, 1, NULL, indicator, index, new_value);
......@@ -337,6 +371,8 @@ rtas_set_indicator(int indicator, int index, int new_value)
break;
}
if (rc < 0)
return rtas_error_rc(rc);
return rc;
}
......
......@@ -347,7 +347,7 @@ static int enable_surveillance(int timeout)
if (error == 0)
return 0;
if (error == RTAS_NO_SUCH_INDICATOR) {
if (error == -EINVAL) {
printk(KERN_INFO "rtasd: surveillance not supported\n");
return 0;
}
......
......@@ -557,48 +557,61 @@ int vio_disable_interrupts(struct vio_dev *dev)
EXPORT_SYMBOL(vio_disable_interrupts);
#endif
dma_addr_t vio_map_single(struct vio_dev *dev, void *vaddr,
static dma_addr_t vio_map_single(struct device *dev, void *vaddr,
size_t size, enum dma_data_direction direction)
{
return iommu_map_single(dev->iommu_table, vaddr, size, direction);
return iommu_map_single(to_vio_dev(dev)->iommu_table, vaddr, size,
direction);
}
EXPORT_SYMBOL(vio_map_single);
void vio_unmap_single(struct vio_dev *dev, dma_addr_t dma_handle,
static void vio_unmap_single(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction direction)
{
iommu_unmap_single(dev->iommu_table, dma_handle, size, direction);
iommu_unmap_single(to_vio_dev(dev)->iommu_table, dma_handle, size,
direction);
}
EXPORT_SYMBOL(vio_unmap_single);
int vio_map_sg(struct vio_dev *vdev, struct scatterlist *sglist, int nelems,
enum dma_data_direction direction)
static int vio_map_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction)
{
return iommu_map_sg(&vdev->dev, vdev->iommu_table, sglist,
return iommu_map_sg(dev, to_vio_dev(dev)->iommu_table, sglist,
nelems, direction);
}
EXPORT_SYMBOL(vio_map_sg);
void vio_unmap_sg(struct vio_dev *vdev, struct scatterlist *sglist, int nelems,
enum dma_data_direction direction)
static void vio_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction)
{
iommu_unmap_sg(vdev->iommu_table, sglist, nelems, direction);
iommu_unmap_sg(to_vio_dev(dev)->iommu_table, sglist, nelems, direction);
}
EXPORT_SYMBOL(vio_unmap_sg);
void *vio_alloc_consistent(struct vio_dev *dev, size_t size,
dma_addr_t *dma_handle)
static void *vio_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, int flag)
{
return iommu_alloc_consistent(dev->iommu_table, size, dma_handle);
return iommu_alloc_coherent(to_vio_dev(dev)->iommu_table, size,
dma_handle, flag);
}
EXPORT_SYMBOL(vio_alloc_consistent);
void vio_free_consistent(struct vio_dev *dev, size_t size,
static void vio_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
iommu_free_consistent(dev->iommu_table, size, vaddr, dma_handle);
iommu_free_coherent(to_vio_dev(dev)->iommu_table, size, vaddr,
dma_handle);
}
EXPORT_SYMBOL(vio_free_consistent);
static int vio_dma_supported(struct device *dev, u64 mask)
{
return 1;
}
struct dma_mapping_ops vio_dma_ops = {
.alloc_coherent = vio_alloc_coherent,
.free_coherent = vio_free_coherent,
.map_single = vio_map_single,
.unmap_single = vio_unmap_single,
.map_sg = vio_map_sg,
.unmap_sg = vio_unmap_sg,
.dma_supported = vio_dma_supported,
};
static int vio_bus_match(struct device *dev, struct device_driver *drv)
{
......
......@@ -654,7 +654,7 @@ void xics_migrate_irqs_away(void)
/* remove ourselves from the global interrupt queue */
status = rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE,
(1UL << interrupt_server_size) - 1 - default_distrib_server, 0);
WARN_ON(status != 0);
WARN_ON(status < 0);
/* Allow IPIs again... */
ops->cppr_info(cpu, DEFAULT_PRIORITY);
......@@ -704,15 +704,8 @@ void xics_migrate_irqs_away(void)
virq, cpu);
/* Reset affinity to all cpus */
xics_status[0] = default_distrib_server;
status = rtas_call(ibm_set_xive, 3, 1, NULL, irq,
xics_status[0], xics_status[1]);
if (status)
printk(KERN_ERR "migrate_irqs_away: irq=%d "
"ibm,set-xive returns %d\n",
virq, status);
desc->handler->set_affinity(virq, CPU_MASK_ALL);
irq_affinity[virq] = CPU_MASK_ALL;
unlock:
spin_unlock_irqrestore(&desc->lock, flags);
}
......
......@@ -142,7 +142,7 @@ static int pci_add_secondary_bus(struct device_node *dn,
child = pci_add_new_bus(bridge_dev->bus, bridge_dev, sec_busno);
if (!child) {
printk(KERN_ERR "%s: could not add secondary bus\n", __FUNCTION__);
return 1;
return -ENOMEM;
}
sprintf(child->name, "PCI Bus #%02x", child->number);
......@@ -204,7 +204,7 @@ static int dlpar_pci_remove_bus(struct pci_dev *bridge_dev)
if (!bridge_dev) {
printk(KERN_ERR "%s: unexpected null device\n",
__FUNCTION__);
return 1;
return -EINVAL;
}
secondary_bus = bridge_dev->subordinate;
......@@ -212,7 +212,7 @@ static int dlpar_pci_remove_bus(struct pci_dev *bridge_dev)
if (unmap_bus_range(secondary_bus)) {
printk(KERN_ERR "%s: failed to unmap bus range\n",
__FUNCTION__);
return 1;
return -ERANGE;
}
pci_remove_bus_device(bridge_dev);
......@@ -282,7 +282,7 @@ static int dlpar_remove_phb(struct slot *slot)
}
rc = dlpar_remove_root_bus(phb);
if (rc)
if (rc < 0)
return rc;
return 0;
......@@ -294,7 +294,7 @@ static int dlpar_add_phb(struct device_node *dn)
phb = init_phb_dynamic(dn);
if (!phb)
return 1;
return -EINVAL;
return 0;
}
......
......@@ -45,11 +45,6 @@
#define LED_ID 2 /* slow blinking */
#define LED_ACTION 3 /* fast blinking */
/* Error status from rtas_get-sensor */
#define NEED_POWER -9000 /* slot must be power up and unisolated to get state */
#define PWR_ONLY -9001 /* slot must be powerd up to get state, leave isolated */
#define ERR_SENSE_USE -9002 /* No DR operation will succeed, slot is unusable */
/* Sensor values from rtas_get-sensor */
#define EMPTY 0 /* No card in slot */
#define PRESENT 1 /* Card in slot */
......
......@@ -256,12 +256,12 @@ int rpaphp_get_drc_props(struct device_node *dn, int *drc_index,
my_index = (int *) get_property(dn, "ibm,my-drc-index", NULL);
if (!my_index) {
/* Node isn't DLPAR/hotplug capable */
return 1;
return -EINVAL;
}
rc = get_children_props(dn->parent, &indexes, &names, &types, &domains);
if (rc < 0) {
return 1;
return -EINVAL;
}
name_tmp = (char *) &names[1];
......@@ -284,7 +284,7 @@ int rpaphp_get_drc_props(struct device_node *dn, int *drc_index,
type_tmp += (strlen(type_tmp) + 1);
}
return 1;
return -EINVAL;
}
static int is_php_type(char *drc_type)
......
......@@ -81,8 +81,8 @@ static int rpaphp_get_sensor_state(struct slot *slot, int *state)
rc = rtas_get_sensor(DR_ENTITY_SENSE, slot->index, state);
if (rc) {
if (rc == NEED_POWER || rc == PWR_ONLY) {
if (rc < 0) {
if (rc == -EFAULT || rc == -EEXIST) {
dbg("%s: slot must be power up to get sensor-state\n",
__FUNCTION__);
......@@ -91,14 +91,14 @@ static int rpaphp_get_sensor_state(struct slot *slot, int *state)
*/
rc = rtas_set_power_level(slot->power_domain, POWER_ON,
&setlevel);
if (rc) {
if (rc < 0) {
dbg("%s: power on slot[%s] failed rc=%d.\n",
__FUNCTION__, slot->name, rc);
} else {
rc = rtas_get_sensor(DR_ENTITY_SENSE,
slot->index, state);
}
} else if (rc == ERR_SENSE_USE)
} else if (rc == -ENODEV)
info("%s: slot is unusable\n", __FUNCTION__);
else
err("%s failed to get sensor state\n", __FUNCTION__);
......@@ -413,7 +413,7 @@ static int setup_pci_hotplug_slot_info(struct slot *slot)
if (slot->hotplug_slot->info->adapter_status == NOT_VALID) {
err("%s: NOT_VALID: skip dn->full_name=%s\n",
__FUNCTION__, slot->dn->full_name);
return -1;
return -EINVAL;
}
return 0;
}
......@@ -426,15 +426,15 @@ static int set_phb_slot_name(struct slot *slot)
dn = slot->dn;
if (!dn) {
return 1;
return -EINVAL;
}
phb = dn->phb;
if (!phb) {
return 1;
return -EINVAL;
}
bus = phb->bus;
if (!bus) {
return 1;
return -EINVAL;
}
sprintf(slot->name, "%04x:%02x:%02x.%x", pci_domain_nr(bus),
......@@ -448,7 +448,7 @@ static int setup_pci_slot(struct slot *slot)
if (slot->type == PHB) {
rc = set_phb_slot_name(slot);
if (rc) {
if (rc < 0) {
err("%s: failed to set phb slot name\n", __FUNCTION__);
goto exit_rc;
}
......@@ -509,12 +509,12 @@ static int setup_pci_slot(struct slot *slot)
return 0;
exit_rc:
dealloc_slot_struct(slot);
return 1;
return -EINVAL;
}
int register_pci_slot(struct slot *slot)
{
int rc = 1;
int rc = -EINVAL;
slot->dev_type = PCI_DEV;
if ((slot->type == EMBEDDED) || (slot->type == PHB))
......
......@@ -211,7 +211,7 @@ int register_slot(struct slot *slot)
if (is_registered(slot)) { /* should't be here */
err("register_slot: slot[%s] is already registered\n", slot->name);
rpaphp_release_slot(slot->hotplug_slot);
return 1;
return -EAGAIN;
}
retval = pci_hp_register(slot->hotplug_slot);
if (retval) {
......@@ -270,7 +270,7 @@ int rpaphp_set_attention_status(struct slot *slot, u8 status)
/* status: LED_OFF or LED_ON */
rc = rtas_set_indicator(DR_INDICATOR, slot->index, status);
if (rc)
if (rc < 0)
err("slot(name=%s location=%s index=0x%x) set attention-status(%d) failed! rc=0x%x\n",
slot->name, slot->location, slot->index, status, rc);
......
......@@ -71,11 +71,11 @@ int register_vio_slot(struct device_node *dn)
{
u32 *index;
char *name;
int rc = 1;
int rc = -EINVAL;
struct slot *slot = NULL;
rc = rpaphp_get_drc_props(dn, NULL, &name, NULL, NULL);
if (rc)
if (rc < 0)
goto exit_rc;
index = (u32 *) get_property(dn, "ibm,my-drc-index", NULL);
if (!index)
......
......@@ -113,4 +113,24 @@ dma_cache_sync(void *vaddr, size_t size,
/* nothing to do */
}
/*
* DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO
*/
struct dma_mapping_ops {
void * (*alloc_coherent)(struct device *dev, size_t size,
dma_addr_t *dma_handle, int flag);
void (*free_coherent)(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle);
dma_addr_t (*map_single)(struct device *dev, void *ptr,
size_t size, enum dma_data_direction direction);
void (*unmap_single)(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction direction);
int (*map_sg)(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction direction);
void (*unmap_sg)(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction direction);
int (*dma_supported)(struct device *dev, u64 mask);
int (*dac_dma_supported)(struct device *dev, u64 mask);
};
#endif /* _ASM_DMA_MAPPING_H */
......@@ -104,17 +104,30 @@ int eeh_unregister_notifier(struct notifier_block *nb);
*/
#define EEH_IO_ERROR_VALUE(size) (~0U >> ((4 - (size)) * 8))
#else
#define eeh_init()
#define eeh_check_failure(token, val) (val)
#define eeh_dn_check_failure(dn, dev) (0)
#define pci_addr_cache_build()
#define eeh_add_device_early(dn)
#define eeh_add_device_late(dev)
#define eeh_remove_device(dev)
#else /* !CONFIG_EEH */
static inline void eeh_init(void) { }
static inline unsigned long eeh_check_failure(const volatile void __iomem *token, unsigned long val)
{
return val;
}
static inline int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
{
return 0;
}
static inline void pci_addr_cache_build(void) { }
static inline void eeh_add_device_early(struct device_node *dn) { }
static inline void eeh_add_device_late(struct pci_dev *dev) { }
static inline void eeh_remove_device(struct pci_dev *dev) { }
#define EEH_POSSIBLE_ERROR(val, type) (0)
#define EEH_IO_ERROR_VALUE(size) (-1UL)
#endif
#endif /* CONFIG_EEH */
/*
* MMIO read/write operations with EEH support.
......
......@@ -145,9 +145,9 @@ extern int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
extern void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction);
extern void *iommu_alloc_consistent(struct iommu_table *tbl, size_t size,
dma_addr_t *dma_handle);
extern void iommu_free_consistent(struct iommu_table *tbl, size_t size,
extern void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
dma_addr_t *dma_handle, int flag);
extern void iommu_free_coherent(struct iommu_table *tbl, size_t size,
void *vaddr, dma_addr_t dma_handle);
extern dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
size_t size, enum dma_data_direction direction);
......
......@@ -13,11 +13,14 @@
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/dma-mapping.h>
#include <asm/machdep.h>
#include <asm/scatterlist.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm-generic/pci-dma-compat.h>
#define PCIBIOS_MIN_IO 0x1000
#define PCIBIOS_MIN_MEM 0x10000000
......@@ -63,131 +66,18 @@ static inline int pcibios_prep_mwi(struct pci_dev *dev)
extern unsigned int pcibios_assign_all_busses(void);
/*
* PCI DMA operations are abstracted for G5 vs. i/pSeries
*/
struct pci_dma_ops {
void * (*pci_alloc_consistent)(struct pci_dev *hwdev, size_t size,
dma_addr_t *dma_handle);
void (*pci_free_consistent)(struct pci_dev *hwdev, size_t size,
void *vaddr, dma_addr_t dma_handle);
dma_addr_t (*pci_map_single)(struct pci_dev *hwdev, void *ptr,
size_t size, enum dma_data_direction direction);
void (*pci_unmap_single)(struct pci_dev *hwdev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction direction);
int (*pci_map_sg)(struct pci_dev *hwdev, struct scatterlist *sg,
int nents, enum dma_data_direction direction);
void (*pci_unmap_sg)(struct pci_dev *hwdev, struct scatterlist *sg,
int nents, enum dma_data_direction direction);
int (*pci_dma_supported)(struct pci_dev *hwdev, u64 mask);
int (*pci_dac_dma_supported)(struct pci_dev *hwdev, u64 mask);
};
extern struct pci_dma_ops pci_dma_ops;
static inline void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
dma_addr_t *dma_handle)
{
return pci_dma_ops.pci_alloc_consistent(hwdev, size, dma_handle);
}
static inline void pci_free_consistent(struct pci_dev *hwdev, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
pci_dma_ops.pci_free_consistent(hwdev, size, vaddr, dma_handle);
}
static inline dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr,
size_t size, int direction)
{
return pci_dma_ops.pci_map_single(hwdev, ptr, size,
(enum dma_data_direction)direction);
}
static inline void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
size_t size, int direction)
{
pci_dma_ops.pci_unmap_single(hwdev, dma_addr, size,
(enum dma_data_direction)direction);
}
static inline int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
int nents, int direction)
{
return pci_dma_ops.pci_map_sg(hwdev, sg, nents,
(enum dma_data_direction)direction);
}
static inline void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg,
int nents, int direction)
{
pci_dma_ops.pci_unmap_sg(hwdev, sg, nents,
(enum dma_data_direction)direction);
}
static inline void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev,
dma_addr_t dma_handle,
size_t size, int direction)
{
BUG_ON(direction == PCI_DMA_NONE);
/* nothing to do */
}
static inline void pci_dma_sync_single_for_device(struct pci_dev *hwdev,
dma_addr_t dma_handle,
size_t size, int direction)
{
BUG_ON(direction == PCI_DMA_NONE);
/* nothing to do */
}
static inline void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev,
struct scatterlist *sg,
int nelems, int direction)
{
BUG_ON(direction == PCI_DMA_NONE);
/* nothing to do */
}
static inline void pci_dma_sync_sg_for_device(struct pci_dev *hwdev,
struct scatterlist *sg,
int nelems, int direction)
{
BUG_ON(direction == PCI_DMA_NONE);
/* nothing to do */
}
/* Return whether the given PCI device DMA address mask can
* be supported properly. For example, if your device can
* only drive the low 24-bits during PCI bus mastering, then
* you would pass 0x00ffffff as the mask to this function.
* We default to supporting only 32 bits DMA unless we have
* an explicit override of this function in pci_dma_ops for
* the platform
*/
static inline int pci_dma_supported(struct pci_dev *hwdev, u64 mask)
{
if (pci_dma_ops.pci_dma_supported)
return pci_dma_ops.pci_dma_supported(hwdev, mask);
return (mask < 0x100000000ull);
}
extern struct dma_mapping_ops pci_dma_ops;
/* For DAC DMA, we currently don't support it by default, but
* we let the platform override this
*/
static inline int pci_dac_dma_supported(struct pci_dev *hwdev,u64 mask)
{
if (pci_dma_ops.pci_dac_dma_supported)
return pci_dma_ops.pci_dac_dma_supported(hwdev, mask);
if (pci_dma_ops.dac_dma_supported)
return pci_dma_ops.dac_dma_supported(&hwdev->dev, mask);
return 0;
}
static inline int pci_dma_mapping_error(dma_addr_t dma_addr)
{
return dma_mapping_error(dma_addr);
}
extern int pci_domain_nr(struct pci_bus *bus);
/* Decide whether to display the domain number in /proc */
......@@ -201,10 +91,6 @@ int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma,
/* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */
#define HAVE_PCI_MMAP 1
#define pci_map_page(dev, page, off, size, dir) \
pci_map_single(dev, (page_address(page) + (off)), size, dir)
#define pci_unmap_page(dev,addr,sz,dir) pci_unmap_single(dev,addr,sz,dir)
/* pci_unmap_{single,page} is not a nop, thus... */
#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
dma_addr_t ADDR_NAME;
......
......@@ -24,12 +24,9 @@
/* RTAS return status codes */
#define RTAS_BUSY -2 /* RTAS Busy */
#define RTAS_NO_SUCH_INDICATOR -3 /* No such indicator implemented */
#define RTAS_EXTENDED_DELAY_MIN 9900
#define RTAS_EXTENDED_DELAY_MAX 9905
#define RTAS_UNKNOWN_OP -1099 /* Unknown RTAS Token */
/*
* In general to call RTAS use rtas_token("string") to lookup
* an RTAS token for the given string (e.g. "event-scan").
......
......@@ -57,32 +57,7 @@ int vio_get_irq(struct vio_dev *dev);
int vio_enable_interrupts(struct vio_dev *dev);
int vio_disable_interrupts(struct vio_dev *dev);
dma_addr_t vio_map_single(struct vio_dev *dev, void *vaddr,
size_t size, enum dma_data_direction direction);
void vio_unmap_single(struct vio_dev *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction direction);
int vio_map_sg(struct vio_dev *vdev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction);
void vio_unmap_sg(struct vio_dev *vdev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction);
void *vio_alloc_consistent(struct vio_dev *dev, size_t size,
dma_addr_t *dma_handle);
void vio_free_consistent(struct vio_dev *dev, size_t size, void *vaddr,
dma_addr_t dma_handle);
static inline int vio_dma_supported(struct vio_dev *hwdev, u64 mask)
{
return 1;
}
#define vio_map_page(dev, page, off, size, dir) \
vio_map_single(dev, (page_address(page) + (off)), size, dir)
#define vio_unmap_page(dev,addr,sz,dir) vio_unmap_single(dev,addr,sz,dir)
static inline int vio_set_dma_mask(struct vio_dev *dev, u64 mask)
{
return -EIO;
}
extern struct dma_mapping_ops vio_dma_ops;
extern struct bus_type vio_bus_type;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment