Commit 2dcf7afa authored by James Simmons's avatar James Simmons

Merge bk://fbdev.bkbits.net/fbdev-2.5

into kozmo.(none):/usr/src/fbdev-2.5
parents 8306d64a 6936a2a0
VERSION = 2
PATCHLEVEL = 5
SUBLEVEL = 51
SUBLEVEL = 52
EXTRAVERSION =
# *DOCUMENTATION*
......
......@@ -33,7 +33,6 @@
* This table stores the address info for each vector handler.
*/
irq_handler_t irq_list[SYS_IRQS];
unsigned int *mach_kstat_irqs;
#define NUM_IRQ_NODES 16
static irq_node_t nodes[NUM_IRQ_NODES];
......@@ -82,8 +81,6 @@ void __init init_IRQ(void)
if (mach_init_IRQ)
mach_init_IRQ();
mach_kstat_irqs = &kstat.irqs[0][0];
}
irq_node_t *new_irq_node(void)
......@@ -233,7 +230,7 @@ asmlinkage void process_int(unsigned long vec, struct pt_regs *fp)
{
if (vec >= VEC_INT1 && vec <= VEC_INT7) {
vec -= VEC_SPUR;
kstat.irqs[0][vec]++;
kstat_cpu(0).irqs[vec]++;
irq_list[vec].handler(vec, irq_list[vec].dev_id, fp);
} else {
if (mach_process_int)
......@@ -254,7 +251,7 @@ int show_interrupts(struct seq_file *p, void *v)
continue;
seq_printf(p, "%3d: %10u ", i,
(i ? kstat.irqs[0][i] : num_spurious));
(i ? kstat_cpu(0).irqs[i] : num_spurious));
if (irq_list[i].flags & IRQ_FLG_LOCK)
seq_printf(p, "L ");
else
......
......@@ -168,7 +168,7 @@ asmlinkage int m68k_fork(struct pt_regs *regs)
asmlinkage int m68k_vfork(struct pt_regs *regs)
{
struct task_struct *p;
p = do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, rdusp(), regs, 0, NULL);
p = do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, rdusp(), regs, 0, NULL, NULL);
return IS_ERR(p) ? PTR_ERR(p) : p->pid;
}
......@@ -183,7 +183,7 @@ asmlinkage int m68k_clone(struct pt_regs *regs)
newsp = regs->d2;
if (!newsp)
newsp = rdusp();
p = do_fork(clone_flags & ~CLONE_IDLETASK, newsp, regs, 0, NULL);
p = do_fork(clone_flags & ~CLONE_IDLETASK, newsp, regs, 0, NULL, NULL);
return IS_ERR(p) ? PTR_ERR(p) : p->pid;
}
......
......@@ -63,9 +63,11 @@ asmlinkage int do_sigsuspend(struct pt_regs *regs)
sigset_t saveset;
mask &= _BLOCKABLE;
spin_lock_irq(&current->sig->siglock);
saveset = current->blocked;
siginitset(&current->blocked, mask);
recalc_sigpending();
spin_unlock_irq(&current->sig->siglock);
regs->d0 = -EINTR;
while (1) {
......@@ -91,9 +93,11 @@ do_rt_sigsuspend(struct pt_regs *regs)
return -EFAULT;
sigdelsetmask(&newset, ~_BLOCKABLE);
spin_lock_irq(&current->sig->siglock);
saveset = current->blocked;
current->blocked = newset;
recalc_sigpending();
spin_unlock_irq(&current->sig->siglock);
regs->d0 = -EINTR;
while (1) {
......@@ -366,8 +370,10 @@ asmlinkage int do_sigreturn(unsigned long __unused)
goto badframe;
sigdelsetmask(&set, ~_BLOCKABLE);
spin_lock_irq(&current->sig->siglock);
current->blocked = set;
recalc_sigpending();
spin_unlock_irq(&current->sig->siglock);
if (restore_sigcontext(regs, &frame->sc, frame + 1, &d0))
goto badframe;
......@@ -393,8 +399,10 @@ asmlinkage int do_rt_sigreturn(unsigned long __unused)
goto badframe;
sigdelsetmask(&set, ~_BLOCKABLE);
spin_lock_irq(&current->sig->siglock);
current->blocked = set;
recalc_sigpending();
spin_unlock_irq(&current->sig->siglock);
if (rt_restore_ucontext(regs, sw, &frame->uc, &d0))
goto badframe;
......@@ -729,10 +737,13 @@ handle_signal(int sig, struct k_sigaction *ka, siginfo_t *info,
if (ka->sa.sa_flags & SA_ONESHOT)
ka->sa.sa_handler = SIG_DFL;
sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
if (!(ka->sa.sa_flags & SA_NODEFER))
if (!(ka->sa.sa_flags & SA_NODEFER)) {
spin_lock_irq(&current->sig->siglock);
sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
sigaddset(&current->blocked,sig);
recalc_sigpending();
recalc_sigpending();
spin_unlock_irq(&current->sig->siglock);
}
}
/*
......
......@@ -241,7 +241,7 @@ ENTRY(sys_call_table)
.long sys_getdents64 /* 220 */
.long sys_fcntl64
.long sys_ni_syscall /* reserved for TUX */
.long sys_security /* reserved for Security */
.long sys_ni_syscall
.long sys_gettid
.long sys_ni_syscall /* 225 */ /* sys_readahead */
.long sys_setxattr
......
......@@ -42,8 +42,8 @@ devclass_attr_store(struct kobject * kobj, struct attribute * attr,
}
static struct sysfs_ops class_sysfs_ops = {
show: devclass_attr_show,
store: devclass_attr_store,
.show = devclass_attr_show,
.store = devclass_attr_store,
};
static struct subsystem class_subsys = {
......
......@@ -49,7 +49,7 @@ int __init register_memblk(struct memblk *memblk, int num, struct node *root)
static int __init register_memblk_type(void)
{
driver_register(&memblk_driver);
return devclass_register(&memblk_devclass);
int error = devclass_register(&memblk_devclass);
return error ? error : driver_register(&memblk_driver);
}
postcore_initcall(register_memblk_type);
......@@ -93,7 +93,7 @@ int __init register_node(struct node *node, int num, struct node *parent)
static int __init register_node_type(void)
{
devclass_register(&node_devclass);
return driver_register(&node_driver);
int error = devclass_register(&node_devclass);
return error ? error : driver_register(&node_driver);
}
postcore_initcall(register_node_type);
......@@ -29,8 +29,11 @@ config AGP_GART
bool "/dev/agpgart (AGP Support)"
depends on GART_IOMMU
config AGP3
bool "AGP 3.0 compliance (EXPERIMENTAL)"
config AGP_INTEL
bool "Intel 440LX/BX/GX and I815/I820/I830M/I830MP/I840/I845/I850/I860 support"
tristate "Intel 440LX/BX/GX and I815/I820/I830M/I830MP/I840/I845/I850/I860 support"
depends on AGP
help
This option gives you AGP support for the GLX component of the
......@@ -40,7 +43,7 @@ config AGP_INTEL
use GLX or DRI. If unsure, say N.
#config AGP_I810
# bool "Intel I810/I815/I830M (on-board) support"
# tristate "Intel I810/I815/I830M (on-board) support"
# depends on AGP
# help
# This option gives you AGP support for the Xserver on the Intel 810
......@@ -48,7 +51,7 @@ config AGP_INTEL
# is required to do any useful video modes with these boards.
config AGP_VIA
bool "VIA chipset support"
tristate "VIA chipset support"
depends on AGP
help
This option gives you AGP support for the GLX component of the
......@@ -58,7 +61,7 @@ config AGP_VIA
use GLX or DRI. If unsure, say N.
config AGP_AMD
bool "AMD Irongate, 761, and 762 support"
tristate "AMD Irongate, 761, and 762 support"
depends on AGP
help
This option gives you AGP support for the GLX component of the
......@@ -68,7 +71,7 @@ config AGP_AMD
use GLX or DRI. If unsure, say N.
config AGP_SIS
bool "Generic SiS support"
tristate "Generic SiS support"
depends on AGP
help
This option gives you AGP support for the GLX component of the "soon
......@@ -81,7 +84,7 @@ config AGP_SIS
use GLX or DRI. If unsure, say N.
config AGP_ALI
bool "ALI chipset support"
tristate "ALI chipset support"
depends on AGP
---help---
This option gives you AGP support for the GLX component of the
......@@ -99,14 +102,14 @@ config AGP_ALI
use GLX or DRI. If unsure, say N.
config AGP_SWORKS
bool "Serverworks LE/HE support"
tristate "Serverworks LE/HE support"
depends on AGP
help
Say Y here to support the Serverworks AGP card. See
<http://www.serverworks.com/> for product descriptions and images.
config AGP_AMD_8151
bool "AMD 8151 support"
tristate "AMD 8151 support"
depends on AGP
default GART_IOMMU
help
......@@ -114,16 +117,28 @@ config AGP_AMD_8151
GART on the AMD Athlon64/Opteron ("Hammer") CPUs.
config AGP_I460
bool "Intel 460GX support"
tristate "Intel 460GX support"
depends on AGP && IA64
help
This option gives you AGP GART support for the Intel 460GX chipset
for IA64 processors.
config AGP_HP_ZX1
bool "HP ZX1 AGP support"
tristate "HP ZX1 AGP support"
depends on AGP && IA64
help
This option gives you AGP GART support for the HP ZX1 chipset
for IA64 processors.
# Put AGP 3.0 entries below here.
config AGP_I7505
tristate "Intel 7205/7505 support (AGP 3.0)"
depends on AGP3
help
This option gives you AGP support for the GLX component of the
XFree86 4.x on Intel I7505 chipsets.
You should say Y here if you use XFree86 3.3.6 or 4.x and want to
use GLX or DRI. If unsure, say N
......@@ -8,6 +8,7 @@ export-objs := backend.o
agpgart-y := backend.o frontend.o generic.o
agpgart-objs := $(agpgart-y)
obj-$(CONFIG_AGP) += agpgart.o
obj-$(CONFIG_AGP3) += generic-3.0.o
obj-$(CONFIG_AGP_INTEL) += intel-agp.o
obj-$(CONFIG_AGP_VIA) += via-agp.o
......@@ -19,3 +20,5 @@ obj-$(CONFIG_AGP_I460) += i460-agp.o
obj-$(CONFIG_AGP_HP_ZX1) += hp-agp.o
obj-$(CONFIG_AGP_AMD_8151) += amd-k8-agp.o
obj-$(CONFIG_AGP_I7x05) += i7x05-agp.o
......@@ -46,28 +46,6 @@ int agp_generic_suspend(void);
void agp_generic_resume(void);
void agp_free_key(int key);
/* chipset specific init routines. */
/*
int __init ali_generic_setup (struct pci_dev *pdev);
int __init amd_irongate_setup (struct pci_dev *pdev);
int __init amd_8151_setup (struct pci_dev *pdev);
int __init hp_zx1_setup (struct pci_dev *pdev);
int __init intel_i460_setup (struct pci_dev *pdev);
int __init intel_generic_setup (struct pci_dev *pdev);
int __init intel_i810_setup(struct pci_dev *i810_dev);
int __init intel_815_setup(struct pci_dev *pdev);
int __init intel_i830_setup(struct pci_dev *i830_dev);
int __init intel_820_setup (struct pci_dev *pdev);
int __init intel_830mp_setup (struct pci_dev *pdev);
int __init intel_840_setup (struct pci_dev *pdev);
int __init intel_845_setup (struct pci_dev *pdev);
int __init intel_850_setup (struct pci_dev *pdev);
int __init intel_860_setup (struct pci_dev *pdev);
int __init serverworks_setup (struct pci_dev *pdev);
int __init sis_generic_setup (struct pci_dev *pdev);
int __init via_generic_setup (struct pci_dev *pdev);
*/
#define PFX "agpgart: "
int agp_register_driver (struct pci_dev *dev);
......
......@@ -138,8 +138,8 @@ static int amd_create_gatt_table(void)
return retval;
}
agp_bridge.gatt_table_real = (u32 *)page_dir.real;
agp_bridge.gatt_table = (u32 *)page_dir.remapped;
agp_bridge.gatt_table_real = (unsigned long *)page_dir.real;
agp_bridge.gatt_table = (unsigned long *)page_dir.remapped;
agp_bridge.gatt_bus_addr = virt_to_phys(page_dir.real);
/* Get the address for the gart region.
......@@ -165,8 +165,8 @@ static int amd_free_gatt_table(void)
{
struct amd_page_map page_dir;
page_dir.real = (u32 *)agp_bridge.gatt_table_real;
page_dir.remapped = (u32 *)agp_bridge.gatt_table;
page_dir.real = (unsigned long *)agp_bridge.gatt_table_real;
page_dir.remapped = (unsigned long *)agp_bridge.gatt_table;
amd_free_gatt_pages();
amd_free_page_map(&page_dir);
......
......@@ -151,7 +151,7 @@ static int amd_x86_64_fetch_size(void)
}
static void inline flush_x86_64_tlb(struct pci_dev *dev)
static void flush_x86_64_tlb(struct pci_dev *dev)
{
u32 tmp;
......
......@@ -36,8 +36,11 @@
#include <linux/agp_backend.h>
#include "agp.h"
#define AGPGART_VERSION_MAJOR 1
#define AGPGART_VERSION_MINOR 0
/* Due to XFree86 brain-damage, we can't go to 1.0 until they
* fix some real stupidity. It's only by chance we can bump
* past 0.99 at all due to some boolean logic error. */
#define AGPGART_VERSION_MAJOR 0
#define AGPGART_VERSION_MINOR 100
struct agp_bridge_data agp_bridge = { .type = NOT_SUPPORTED };
......@@ -258,7 +261,7 @@ int agp_register_driver (struct pci_dev *dev)
return 0;
}
int __exit agp_unregister_driver(void)
int agp_unregister_driver(void)
{
agp_bridge.type = NOT_SUPPORTED;
pm_unregister_all(agp_power);
......@@ -269,8 +272,23 @@ int __exit agp_unregister_driver(void)
return 0;
}
int __exit agp_exit(void)
{
if (agp_count==0)
return -EBUSY;
return 0;
}
int __init agp_init(void)
{
static int already_initialised=0;
if (already_initialised!=0)
return 0;
already_initialised = 1;
memset(&agp_bridge, 0, sizeof(struct agp_bridge_data));
agp_bridge.type = NOT_SUPPORTED;
......@@ -281,11 +299,13 @@ int __init agp_init(void)
#ifndef CONFIG_GART_IOMMU
module_init(agp_init);
module_exit(agp_exit);
#endif
EXPORT_SYMBOL(agp_backend_acquire);
EXPORT_SYMBOL(agp_backend_release);
EXPORT_SYMBOL_GPL(agp_register_driver);
EXPORT_SYMBOL_GPL(agp_unregister_driver);
MODULE_AUTHOR("Dave Jones <davej@codemonkey.org.uk>");
MODULE_LICENSE("GPL and additional rights");
......@@ -1062,9 +1062,9 @@ static struct file_operations agp_fops =
static struct miscdevice agp_miscdev =
{
AGPGART_MINOR,
"agpgart",
&agp_fops
.minor = AGPGART_MINOR,
.name = "agpgart",
.fops = &agp_fops
};
int __init agp_frontend_initialize(void)
......@@ -1079,7 +1079,7 @@ int __init agp_frontend_initialize(void)
return 0;
}
void __exit agp_frontend_cleanup(void)
void agp_frontend_cleanup(void)
{
misc_deregister(&agp_miscdev);
}
......
This diff is collapsed.
......@@ -469,7 +469,7 @@ int agp_generic_create_gatt_table(void)
for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
SetPageReserved(page);
agp_bridge.gatt_table_real = (u32 *) table;
agp_bridge.gatt_table_real = (unsigned long *) table;
agp_gatt_table = (void *)table;
CACHE_FLUSH();
agp_bridge.gatt_table = ioremap_nocache(virt_to_phys(table),
......@@ -693,7 +693,23 @@ void agp_enable(u32 mode)
EXPORT_SYMBOL(agp_free_memory);
EXPORT_SYMBOL(agp_allocate_memory);
EXPORT_SYMBOL(agp_copy_info);
EXPORT_SYMBOL(agp_create_memory);
EXPORT_SYMBOL(agp_bind_memory);
EXPORT_SYMBOL(agp_unbind_memory);
EXPORT_SYMBOL(agp_free_key);
EXPORT_SYMBOL(agp_enable);
EXPORT_SYMBOL(agp_bridge);
EXPORT_SYMBOL(agp_generic_alloc_page);
EXPORT_SYMBOL(agp_generic_destroy_page);
EXPORT_SYMBOL(agp_generic_suspend);
EXPORT_SYMBOL(agp_generic_resume);
EXPORT_SYMBOL(agp_generic_agp_enable);
EXPORT_SYMBOL(agp_generic_create_gatt_table);
EXPORT_SYMBOL(agp_generic_free_gatt_table);
EXPORT_SYMBOL(agp_generic_insert_memory);
EXPORT_SYMBOL(agp_generic_remove_memory);
EXPORT_SYMBOL(agp_generic_alloc_by_type);
EXPORT_SYMBOL(agp_generic_free_by_type);
EXPORT_SYMBOL(global_cache_flush);
......@@ -18,8 +18,7 @@
#define HP_ZX1_SBA_IOMMU_COOKIE 0x0000badbadc0ffeeUL
#define HP_ZX1_PDIR_VALID_BIT 0x8000000000000000UL
#define HP_ZX1_IOVA_TO_PDIR(va) ((va - hp_private.iova_base) >> \
hp_private.io_tlb_shift)
#define HP_ZX1_IOVA_TO_PDIR(va) ((va - hp_private.iova_base) >> hp_private.io_tlb_shift)
static struct aper_size_info_fixed hp_zx1_sizes[] =
{
......@@ -330,12 +329,7 @@ static unsigned long hp_zx1_mask_memory(unsigned long addr, int type)
return HP_ZX1_PDIR_VALID_BIT | addr;
}
static unsigned long hp_zx1_unmask_memory(unsigned long addr)
{
return addr & ~(HP_ZX1_PDIR_VALID_BIT);
}
int __init hp_zx1_setup (struct pci_dev *pdev)
int __init hp_zx1_setup (struct pci_dev *pdev __attribute__((unused)))
{
agp_bridge.masks = hp_zx1_masks;
agp_bridge.num_of_masks = 1;
......@@ -347,7 +341,6 @@ int __init hp_zx1_setup (struct pci_dev *pdev)
agp_bridge.cleanup = hp_zx1_cleanup;
agp_bridge.tlb_flush = hp_zx1_tlbflush;
agp_bridge.mask_memory = hp_zx1_mask_memory;
agp_bridge.unmask_memory = hp_zx1_unmask_memory;
agp_bridge.agp_enable = agp_generic_agp_enable;
agp_bridge.cache_flush = global_cache_flush;
agp_bridge.create_gatt_table = hp_zx1_create_gatt_table;
......@@ -375,8 +368,6 @@ static int __init agp_find_supported_device(struct pci_dev *dev)
return hp_zx1_setup(dev);
}
return -ENODEV;
}
static int agp_hp_probe (struct pci_dev *dev, const struct pci_device_id *ent)
{
......
This diff is collapsed.
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/agp_backend.h>
#include "agp.h"
static int intel_7505_fetch_size(void)
{
int i;
u16 tmp;
aper_size_info_16 *values;
/*
* For AGP 3.0 APSIZE is now 16 bits
*/
pci_read_config_word (agp_bridge.dev, INTEL_I7505_APSIZE, &tmp);
tmp = (tmp & 0xfff);
values = A_SIZE_16(agp_bridge.aperture_sizes);
for (i=0; i < agp_bridge.num_aperture_sizes; i++) {
if (tmp == values[i].size_value) {
agp_bridge.previous_size = agp_bridge.current_size =
(void *)(values + i);
agp_bridge.aperture_size_idx = i;
return values[i].size;
}
}
return 0;
}
static void intel_7505_tlbflush(agp_memory *mem)
{
u32 temp;
pci_read_config_dword(agp_bridge.dev, INTEL_I7505_AGPCTRL, &temp);
pci_write_config_dword(agp_bridge.dev, INTEL_I7505_AGPCTRL, temp & ~(1 << 7));
pci_read_config_dword(agp_bridge.dev, INTEL_I7505_AGPCTRL, &temp);
pci_write_config_dword(agp_bridge.dev, INTEL_I7505_AGPCTRL, temp | (1 << 7));
}
static void intel_7505_cleanup(void)
{
aper_size_info_16 *previous_size;
previous_size = A_SIZE_16(agp_bridge.previous_size);
pci_write_config_byte(agp_bridge.dev, INTEL_I7505_APSIZE,
previous_size->size_value);
}
static int intel_7505_configure(void)
{
u32 temp;
aper_size_info_16 *current_size;
current_size = A_SIZE_16(agp_bridge.current_size);
/* aperture size */
pci_write_config_word(agp_bridge.dev, INTEL_I7505_APSIZE,
current_size->size_value);
/* address to map to */
pci_read_config_dword(agp_bridge.dev, INTEL_I7505_NAPBASELO, &temp);
agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
/* attbase */
pci_write_config_dword(agp_bridge.dev, INTEL_I7505_ATTBASE,
agp_bridge.gatt_bus_addr);
/* agpctrl */
pci_write_config_dword(agp_bridge.dev, INTEL_I7505_AGPCTRL, 0x0000);
/* clear error registers */
pci_write_config_byte(agp_bridge.dev, INTEL_I7505_ERRSTS, 0xff);
return 0;
}
static aper_size_info_16 intel_7505_sizes[7] =
{
{256, 65536, 6, 0xf00},
{128, 32768, 5, 0xf20},
{64, 16384, 4, 0xf30},
{32, 8192, 3, 0xf38},
{16, 4096, 2, 0xf3c},
{8, 2048, 1, 0xf3e},
{4, 1024, 0, 0xf3f}
};
static int __init intel_7505_setup (struct pci_dev *pdev)
{
agp_bridge.masks = intel_generic_masks;
agp_bridge.num_of_masks = 1;
agp_bridge.aperture_sizes = (void *) intel_7505_sizes;
agp_bridge.size_type = U16_APER_SIZE;
agp_bridge.num_aperture_sizes = 7;
agp_bridge.dev_private_data = NULL;
agp_bridge.needs_scratch_page = FALSE;
agp_bridge.configure = intel_7505_configure;
agp_bridge.fetch_size = intel_7505_fetch_size;
agp_bridge.cleanup = intel_7505_cleanup;
agp_bridge.tlb_flush = intel_7505_tlbflush;
agp_bridge.mask_memory = intel_mask_memory;
agp_bridge.agp_enable = agp_generic_agp_3_0_enable;
agp_bridge.cache_flush = global_cache_flush;
agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
agp_bridge.insert_memory = agp_generic_insert_memory;
agp_bridge.remove_memory = agp_generic_remove_memory;
agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
agp_bridge.free_by_type = agp_generic_free_by_type;
agp_bridge.agp_alloc_page = agp_generic_alloc_page;
agp_bridge.agp_destroy_page = agp_generic_destroy_page;
agp_bridge.suspend = agp_generic_suspend;
agp_bridge.resume = agp_generic_resume;
agp_bridge.cant_use_aperture = 0;
return 0;
}
struct agp_device_ids i7x05_agp_device_ids[] __initdata =
{
{
.device_id = PCI_DEVICE_ID_INTEL_7505_0,
.chipset = INTEL_I7505,
.chipset_name = "i7505",
},
{
.device_id = PCI_DEVICE_ID_INTEL_7205_0,
.chipset = INTEL_I7505,
.chipset_name = "i7205",
},
{ }, /* dummy final entry, always present */
};
/* scan table above for supported devices */
static int __init agp_lookup_host_bridge (struct pci_dev *pdev)
{
int j=0;
struct agp_device_ids *devs;
devs = i7x05_agp_device_ids;
while (devs[j].chipset_name != NULL) {
if (pdev->device == devs[j].device_id) {
printk (KERN_INFO PFX "Detected Intel %s chipset\n",
devs[j].chipset_name);
agp_bridge.type = devs[j].chipset;
if (devs[j].chipset_setup != NULL)
return devs[j].chipset_setup(pdev);
else
return intel_7505_setup(pdev);
}
j++;
}
printk(KERN_ERR PFX "Unsupported Intel chipset (device id: %04x),",
pdev->device);
return -ENODEV;
}
static int __init agp_find_supported_device(struct pci_dev *dev)
{
agp_bridge.dev = dev;
if (pci_find_capability(dev, PCI_CAP_ID_AGP)==0)
return -ENODEV;
/* probe for known chipsets */
return agp_lookup_host_bridge(dev);
}
static int agp_i7x05_probe (struct pci_dev *dev, const struct pci_device_id *ent)
{
if (agp_find_supported_device(dev) == 0) {
agp_register_driver(dev);
return 0;
}
return -ENODEV;
}
static struct pci_device_id agp_i7x05_pci_table[] __initdata = {
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_INTEL,
.device = PCI_ANY_ID,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
{ }
};
MODULE_DEVICE_TABLE(pci, agp_i7x05_pci_table);
static struct pci_driver agp_i7x05_pci_driver = {
.name = "agpgart-i7x05",
.id_table = agp_i7x05_pci_table,
.probe = agp_i7x05_probe,
};
int __init agp_i7x05_init(void)
{
int ret_val;
ret_val = pci_module_init(&agp_i7x05_pci_driver);
if (ret_val)
agp_bridge.type = NOT_SUPPORTED;
return ret_val;
}
static void __exit agp_i7x05_cleanup(void)
{
agp_unregister_driver();
pci_unregister_driver(&agp_i7x05_pci_driver);
}
module_init(agp_i7x05_init);
module_exit(agp_i7x05_cleanup);
MODULE_AUTHOR("Matthew E Tolentino <matthew.e.tolentino@intel.com>");
MODULE_LICENSE("GPL and additional rights");
......@@ -1473,6 +1473,11 @@ static struct pci_driver agp_intel_pci_driver = {
static int __init agp_intel_init(void)
{
int ret_val;
static int agp_initialised=0;
if (agp_initialised==1)
return 0;
agp_initialised=1;
ret_val = pci_module_init(&agp_intel_pci_driver);
if (ret_val)
......
......@@ -28,16 +28,16 @@
#define DRIVER_FOPS \
static struct file_operations DRM(fops) = { \
owner: THIS_MODULE, \
open: DRM(open), \
flush: DRM(flush), \
release: DRM(release), \
ioctl: DRM(ioctl), \
mmap: DRM(mmap), \
read: DRM(read), \
fasync: DRM(fasync), \
poll: DRM(poll), \
get_unmapped_area: ffb_get_unmapped_area, \
.owner = THIS_MODULE, \
.open = DRM(open), \
.flush = DRM(flush), \
.release = DRM(release), \
.ioctl = DRM(ioctl), \
.mmap = DRM(mmap), \
.read = DRM(read), \
.fasync = DRM(fasync), \
.poll = DRM(poll), \
.get_unmapped_area = ffb_get_unmapped_area, \
}
#define DRIVER_COUNT_CARDS() ffb_count_card_instances()
......
......@@ -96,13 +96,13 @@ static ssize_t zft_write(struct file *fp, const char *buff,
static struct file_operations zft_cdev =
{
owner: THIS_MODULE,
read: zft_read,
write: zft_write,
ioctl: zft_ioctl,
mmap: zft_mmap,
open: zft_open,
release: zft_close,
.owner = THIS_MODULE,
.read = zft_read,
.write = zft_write,
.ioctl = zft_ioctl,
.mmap = zft_mmap,
.open = zft_open,
.release = zft_close,
};
/* Open floppy tape device
......
......@@ -429,12 +429,12 @@ static int register_serial_portandirq(unsigned int port, int irq)
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
static struct file_operations mwave_fops = {
owner:THIS_MODULE,
read:mwave_read,
write:mwave_write,
ioctl:mwave_ioctl,
open:mwave_open,
release:mwave_close
.owner = THIS_MODULE,
.read = mwave_read,
.write = mwave_write,
.ioctl = mwave_ioctl,
.open = mwave_open,
.release = mwave_close
};
#else
static struct file_operations mwave_fops = {
......
......@@ -276,8 +276,8 @@ static struct real_driver rio_real_driver = {
*/
static struct file_operations rio_fw_fops = {
owner: THIS_MODULE,
ioctl: rio_fw_ioctl,
.owner = THIS_MODULE,
.ioctl = rio_fw_ioctl,
};
struct miscdevice rio_fw_device = {
......
......@@ -714,11 +714,11 @@ static int __devinit sonypi_probe(struct pci_dev *pcidev) {
SONYPI_DRIVER_MAJORVERSION,
SONYPI_DRIVER_MINORVERSION);
printk(KERN_INFO "sonypi: detected %s model, "
"verbose = %s, fnkeyinit = %s, camera = %s, "
"verbose = %d, fnkeyinit = %s, camera = %s, "
"compat = %s, mask = 0x%08lx\n",
(sonypi_device.model == SONYPI_DEVICE_MODEL_TYPE1) ?
"type1" : "type2",
verbose ? "on" : "off",
verbose,
fnkeyinit ? "on" : "off",
camera ? "on" : "off",
compat ? "on" : "off",
......
......@@ -37,7 +37,7 @@
#ifdef __KERNEL__
#define SONYPI_DRIVER_MAJORVERSION 1
#define SONYPI_DRIVER_MINORVERSION 16
#define SONYPI_DRIVER_MINORVERSION 17
#define SONYPI_DEVICE_MODEL_TYPE1 1
#define SONYPI_DEVICE_MODEL_TYPE2 2
......@@ -171,6 +171,13 @@ struct sonypi_event {
u8 data;
u8 event;
};
/* The set of possible button release events */
static struct sonypi_event sonypi_releaseev[] = {
{ 0x00, SONYPI_EVENT_ANYBUTTON_RELEASED },
{ 0, 0 }
};
/* The set of possible jogger events */
static struct sonypi_event sonypi_joggerev[] = {
{ 0x1f, SONYPI_EVENT_JOGDIAL_UP },
......@@ -186,7 +193,6 @@ static struct sonypi_event sonypi_joggerev[] = {
{ 0x5d, SONYPI_EVENT_JOGDIAL_VFAST_UP_PRESSED },
{ 0x43, SONYPI_EVENT_JOGDIAL_VFAST_DOWN_PRESSED },
{ 0x40, SONYPI_EVENT_JOGDIAL_PRESSED },
{ 0x00, SONYPI_EVENT_JOGDIAL_RELEASED },
{ 0, 0 }
};
......@@ -195,7 +201,6 @@ static struct sonypi_event sonypi_captureev[] = {
{ 0x05, SONYPI_EVENT_CAPTURE_PARTIALPRESSED },
{ 0x07, SONYPI_EVENT_CAPTURE_PRESSED },
{ 0x01, SONYPI_EVENT_CAPTURE_PARTIALRELEASED },
{ 0x00, SONYPI_EVENT_CAPTURE_RELEASED },
{ 0, 0 }
};
......@@ -293,6 +298,7 @@ struct sonypi_eventtypes {
unsigned long mask;
struct sonypi_event * events;
} sonypi_eventtypes[] = {
{ SONYPI_DEVICE_MODEL_TYPE1, 0, 0xffffffff, sonypi_releaseev },
{ SONYPI_DEVICE_MODEL_TYPE1, 0x70, SONYPI_MEYE_MASK, sonypi_meyeev },
{ SONYPI_DEVICE_MODEL_TYPE1, 0x30, SONYPI_LID_MASK, sonypi_lidev },
{ SONYPI_DEVICE_MODEL_TYPE1, 0x60, SONYPI_CAPTURE_MASK, sonypi_captureev },
......@@ -301,6 +307,7 @@ struct sonypi_eventtypes {
{ SONYPI_DEVICE_MODEL_TYPE1, 0x30, SONYPI_BLUETOOTH_MASK, sonypi_blueev },
{ SONYPI_DEVICE_MODEL_TYPE1, 0x40, SONYPI_PKEY_MASK, sonypi_pkeyev },
{ SONYPI_DEVICE_MODEL_TYPE2, 0, 0xffffffff, sonypi_releaseev },
{ SONYPI_DEVICE_MODEL_TYPE2, 0x38, SONYPI_LID_MASK, sonypi_lidev },
{ SONYPI_DEVICE_MODEL_TYPE2, 0x08, SONYPI_JOGGER_MASK, sonypi_joggerev },
{ SONYPI_DEVICE_MODEL_TYPE2, 0x08, SONYPI_CAPTURE_MASK, sonypi_captureev },
......
......@@ -139,7 +139,7 @@ static int softdog_ioctl(struct inode *inode, struct file *file,
unsigned int cmd, unsigned long arg)
{
static struct watchdog_info ident = {
identity: "Software Watchdog",
.identity = "Software Watchdog",
};
switch (cmd) {
default:
......@@ -158,17 +158,17 @@ static int softdog_ioctl(struct inode *inode, struct file *file,
}
static struct file_operations softdog_fops = {
owner: THIS_MODULE,
write: softdog_write,
ioctl: softdog_ioctl,
open: softdog_open,
release: softdog_release,
.owner = THIS_MODULE,
.write = softdog_write,
.ioctl = softdog_ioctl,
.open = softdog_open,
.release = softdog_release,
};
static struct miscdevice softdog_miscdev = {
minor: WATCHDOG_MINOR,
name: "watchdog",
fops: &softdog_fops,
.minor = WATCHDOG_MINOR,
.name = "watchdog",
.fops = &softdog_fops,
};
static char banner[] __initdata = KERN_INFO "Software Watchdog Timer: 0.06, soft_margin: %d sec, nowayout: %d\n";
......
......@@ -17,10 +17,10 @@ static int cs5520_get_info(char *, char **, off_t, int);
static ide_pci_host_proc_t cs5520_procs[] __initdata = {
{
name: "cs5520",
set: 1,
get_info: cs5520_get_info,
parent: NULL,
.name = "cs5520",
.set = 1,
.get_info = cs5520_get_info,
.parent = NULL,
},
};
#endif /* defined(DISPLAY_CS5520_TIMINGS) && defined(CONFIG_PROC_FS) */
......@@ -31,32 +31,32 @@ static void cs5520_init_setup_dma(struct pci_dev *dev, struct ide_pci_device_s *
static ide_pci_device_t cyrix_chipsets[] __devinitdata = {
{
vendor: PCI_VENDOR_ID_CYRIX,
device: PCI_DEVICE_ID_CYRIX_5510,
name: "Cyrix 5510",
init_chipset: init_chipset_cs5520,
init_setup_dma: cs5520_init_setup_dma,
init_iops: NULL,
init_hwif: init_hwif_cs5520,
isa_ports: 1,
channels: 2,
autodma: AUTODMA,
bootable: ON_BOARD,
extra: 0,
.vendor = PCI_VENDOR_ID_CYRIX,
.device = PCI_DEVICE_ID_CYRIX_5510,
.name = "Cyrix 5510",
.init_chipset = init_chipset_cs5520,
.init_setup_dma = cs5520_init_setup_dma,
.init_iops = NULL,
.init_hwif = init_hwif_cs5520,
.isa_ports = 1,
.channels = 2,
.autodma = AUTODMA,
.bootable = ON_BOARD,
.extra = 0,
},
{
vendor: PCI_VENDOR_ID_CYRIX,
device: PCI_DEVICE_ID_CYRIX_5520,
name: "Cyrix 5520",
init_chipset: init_chipset_cs5520,
init_setup_dma: cs5520_init_setup_dma,
init_iops: NULL,
init_hwif: init_hwif_cs5520,
isa_ports: 1,
channels: 2,
autodma: AUTODMA,
bootable: ON_BOARD,
extra: 0,
.vendor = PCI_VENDOR_ID_CYRIX,
.device = PCI_DEVICE_ID_CYRIX_5520,
.name = "Cyrix 5520",
.init_chipset = init_chipset_cs5520,
.init_setup_dma = cs5520_init_setup_dma,
.init_iops = NULL,
.init_hwif = init_hwif_cs5520,
.isa_ports = 1,
.channels = 2,
.autodma = AUTODMA,
.bootable = ON_BOARD,
.extra = 0,
}
};
......
......@@ -17,10 +17,10 @@ static int sc1200_get_info(char *, char **, off_t, int);
static ide_pci_host_proc_t sc1200_procs[] __initdata = {
{
name: "sc1200",
set: 1,
get_info: sc1200_get_info,
parent: NULL,
.name = "sc1200",
.set = 1,
.get_info = sc1200_get_info,
.parent = NULL,
},
};
#endif /* DISPLAY_SC1200_TIMINGS && CONFIG_PROC_FS */
......@@ -31,23 +31,23 @@ static void init_dma_sc1200(ide_hwif_t *, unsigned long);
static ide_pci_device_t sc1200_chipsets[] __devinitdata = {
{ /* 0 */
vendor: PCI_VENDOR_ID_NS,
device: PCI_DEVICE_ID_NS_SCx200_IDE,
name: "SC1200",
init_chipset: init_chipset_sc1200,
init_iops: NULL,
init_hwif: init_hwif_sc1200,
init_dma: init_dma_sc1200,
channels: 2,
autodma: AUTODMA,
enablebits: {{0x00,0x00,0x00}, {0x00,0x00,0x00}},
bootable: ON_BOARD,
extra: 0,
.vendor = PCI_VENDOR_ID_NS,
.device = PCI_DEVICE_ID_NS_SCx200_IDE,
.name = "SC1200",
.init_chipset = init_chipset_sc1200,
.init_iops = NULL,
.init_hwif = init_hwif_sc1200,
.init_dma = init_dma_sc1200,
.channels = 2,
.autodma = AUTODMA,
.enablebits = {{0x00,0x00,0x00}, {0x00,0x00,0x00}},
.bootable = ON_BOARD,
.extra = 0,
},{
vendor: 0,
device: 0,
channels: 0,
bootable: EOL,
.vendor = 0,
.device = 0,
.channels = 0,
.bootable = EOL,
}
};
......
......@@ -5,7 +5,7 @@ menu "IEEE 1394 (FireWire) support (EXPERIMENTAL)"
config IEEE1394
tristate "IEEE 1394 (FireWire) support (EXPERIMENTAL)"
---help---
help
IEEE 1394 describes a high performance serial bus, which is also
known as FireWire(tm) or i.Link(tm) and is used for connecting all
sorts of devices (most notably digital video cameras) to your
......@@ -20,6 +20,36 @@ config IEEE1394
say M here and read <file:Documentation/modules.txt>. The module
will be called ieee1394.o.
comment "Subsystem Options"
depends on IEEE1394
config IEEE1394_VERBOSEDEBUG
bool "Excessive debugging output"
depends on IEEE1394
help
If you say Y here, you will get very verbose debugging logs from
the subsystem which includes a dump of the header of every sent
and received packet. This can amount to a high amount of data
collected in a very short time which is usually also saved to
disk by the system logging daemons.
Say Y if you really want or need the debugging output, everyone
else says N.
config IEEE1394_OUI_DB
bool "OUI Database built-in"
depends on IEEE1394
help
If you say Y here, then an OUI list (vendor unique ID's) will be
compiled into the ieee1394 module. This doesn't really do much
accept being able to display the vendor of a hardware node. The
downside is that it adds about 300k to the size of the module,
or kernel (depending on whether you compile ieee1394 as a
module, or static in the kernel).
This option is not needed for userspace programs like gscanbus
to show this information.
comment "Device Drivers"
depends on IEEE1394
......@@ -47,7 +77,7 @@ config IEEE1394_PCILYNX
config IEEE1394_OHCI1394
tristate "OHCI-1394 support"
depends on IEEE1394
---help---
help
Enable this driver if you have an IEEE 1394 controller based on the
OHCI-1394 specification. The current driver is only tested with OHCI
chipsets made by Texas Instruments and NEC. Most third-party vendors
......@@ -64,7 +94,7 @@ comment "Protocol Drivers"
config IEEE1394_VIDEO1394
tristate "OHCI-1394 Video support"
depends on IEEE1394_OHCI1394
depends on IEEE1394 && IEEE1394_OHCI1394
help
This option enables video device usage for OHCI-1394 cards. Enable
this option only if you have an IEEE 1394 video device connected to
......@@ -72,14 +102,14 @@ config IEEE1394_VIDEO1394
config IEEE1394_SBP2
tristate "SBP-2 support (Harddisks etc.)"
depends on SCSI && IEEE1394
depends on IEEE1394 && SCSI
help
This option enables you to use SBP-2 devices connected to your IEEE
1394 bus. SBP-2 devices include harddrives and DVD devices.
config IEEE1394_SBP2_PHYS_DMA
bool "Enable Phys DMA support for SBP2 (Debug)"
depends on IEEE1394_SBP2
depends on IEEE1394 && IEEE1394_SBP2
config IEEE1394_ETH1394
tristate "Ethernet over 1394"
......@@ -90,8 +120,8 @@ config IEEE1394_ETH1394
config IEEE1394_DV1394
tristate "OHCI-DV I/O support"
depends on IEEE1394_OHCI1394
---help---
depends on IEEE1394 && IEEE1394_OHCI1394
help
This driver allows you to transmit and receive DV (digital video)
streams on an OHCI-1394 card using a simple frame-oriented
interface.
......@@ -131,8 +161,8 @@ config IEEE1394_CMP
config IEEE1394_AMDTP
tristate "IEC61883-6 (Audio transmission) support"
depends on IEEE1394_OHCI1394 && IEEE1394_CMP
---help---
depends on IEEE1394 && IEEE1394_OHCI1394 && IEEE1394_CMP
help
This option enables the Audio & Music Data Transmission Protocol
(IEC61883-6) driver, which implements audio transmission over
IEEE1394.
......@@ -144,18 +174,4 @@ config IEEE1394_AMDTP
say M here and read <file:Documentation/modules.txt>. The module
will be called amdtp.o.
config IEEE1394_VERBOSEDEBUG
bool "Excessive debugging output"
depends on IEEE1394
help
If you say Y here, you will get very verbose debugging logs from the
subsystem which includes a dump of the header of every sent and
received packet. This can amount to a high amount of data collected
in a very short time which is usually also saved to disk by the
system logging daemons.
Say Y if you really want or need the debugging output, everyone else
says N.
endmenu
......@@ -5,7 +5,7 @@
export-objs := ieee1394_core.o ohci1394.o cmp.o
ieee1394-objs := ieee1394_core.o ieee1394_transactions.o hosts.o \
highlevel.o csr.o nodemgr.o
highlevel.o csr.o nodemgr.o oui.o dma.o iso.o
obj-$(CONFIG_IEEE1394) += ieee1394.o
obj-$(CONFIG_IEEE1394_PCILYNX) += pcilynx.o
......@@ -17,3 +17,14 @@ obj-$(CONFIG_IEEE1394_DV1394) += dv1394.o
obj-$(CONFIG_IEEE1394_ETH1394) += eth1394.o
obj-$(CONFIG_IEEE1394_AMDTP) += amdtp.o
obj-$(CONFIG_IEEE1394_CMP) += cmp.o
clean-files := oui.c
ifeq ($(obj),)
obj = .
endif
$(obj)/oui.o: $(obj)/oui.c
$(obj)/oui.c: $(obj)/oui.db $(obj)/oui2c.sh
$(CONFIG_SHELL) $(obj)/oui2c.sh < $(obj)/oui.db > $(obj)/oui.c
......@@ -688,7 +688,7 @@ static u32 get_header_bits(struct stream *s, int sub_frame, u32 sample)
return get_iec958_header_bits(s, sub_frame, sample);
case AMDTP_FORMAT_RAW:
return 0x40000000;
return 0x40;
default:
return 0;
......@@ -833,8 +833,9 @@ static int stream_alloc_packet_lists(struct stream *s)
max_nevents = fraction_ceil(&s->samples_per_cycle);
max_packet_size = max_nevents * s->dimension * 4 + 8;
s->packet_pool = pci_pool_create("packet pool", s->host->ohci->dev,
max_packet_size, 0, 0);
s->packet_pool = hpsb_pci_pool_create("packet pool", s->host->ohci->dev,
max_packet_size, 0, 0 ,SLAB_KERNEL);
if (s->packet_pool == NULL)
return -1;
......@@ -1018,9 +1019,10 @@ struct stream *stream_alloc(struct amdtp_host *host)
return NULL;
}
s->descriptor_pool = pci_pool_create("descriptor pool", host->ohci->dev,
s->descriptor_pool = hpsb_pci_pool_create("descriptor pool", host->ohci->dev,
sizeof(struct descriptor_block),
16, 0);
16, 0 ,SLAB_KERNEL);
if (s->descriptor_pool == NULL) {
kfree(s->input);
kfree(s);
......@@ -1107,7 +1109,7 @@ static ssize_t amdtp_write(struct file *file, const char *buffer, size_t count,
*/
for (i = 0; i < count; i += length) {
p = buffer_put_bytes(s->input, count, &length);
p = buffer_put_bytes(s->input, count - i, &length);
copy_from_user(p, buffer + i, length);
if (s->input->length < s->input->size)
continue;
......@@ -1210,7 +1212,7 @@ static void amdtp_add_host(struct hpsb_host *host)
if (strcmp(host->driver->name, OHCI1394_DRIVER_NAME) != 0)
return;
ah = kmalloc(sizeof *ah, SLAB_KERNEL);
ah = kmalloc(sizeof *ah, in_interrupt() ? SLAB_ATOMIC : SLAB_KERNEL);
ah->host = host;
ah->ohci = host->hostdata;
INIT_LIST_HEAD(&ah->stream_list);
......
......@@ -34,6 +34,7 @@
#include <linux/sched.h>
#include <linux/types.h>
#include <linux/wait.h>
#include <linux/interrupt.h>
#include "hosts.h"
#include "highlevel.h"
......@@ -158,7 +159,7 @@ static void cmp_add_host(struct hpsb_host *host)
{
struct cmp_host *ch;
ch = kmalloc(sizeof *ch, SLAB_KERNEL);
ch = kmalloc(sizeof *ch, in_interrupt() ? SLAB_ATOMIC : SLAB_KERNEL);
if (ch == NULL) {
HPSB_ERR("Failed to allocate cmp_host");
return;
......
......@@ -28,7 +28,7 @@
/* Module Parameters */
/* this module parameter can be used to disable mapping of the FCP registers */
MODULE_PARM(fcp,"i");
MODULE_PARM_DESC(fcp, "FCP-registers");
MODULE_PARM_DESC(fcp, "Map FCP registers (default = 1, disable = 0).");
static int fcp = 1;
static u16 csr_crc16(unsigned *data, int length)
......@@ -54,8 +54,15 @@ static void host_reset(struct hpsb_host *host)
host->csr.bus_manager_id = 0x3f;
host->csr.bandwidth_available = 4915;
host->csr.channels_available_hi = ~0;
host->csr.channels_available_hi = 0xfffffffe; /* pre-alloc ch 31 per 1394a-2000 */
host->csr.channels_available_lo = ~0;
host->csr.broadcast_channel = 0x80000000 | 31;
if (host->is_irm) {
if (host->driver->hw_csr_reg) {
host->driver->hw_csr_reg(host, 2, 0xfffffffe, ~0);
}
}
host->csr.node_ids = host->node_id << 16;
......@@ -95,8 +102,15 @@ static void add_host(struct hpsb_host *host)
host->csr.bus_time = 0;
host->csr.bus_manager_id = 0x3f;
host->csr.bandwidth_available = 4915;
host->csr.channels_available_hi = ~0;
host->csr.channels_available_hi = 0xfffffffe; /* pre-alloc ch 31 per 1394a-2000 */
host->csr.channels_available_lo = ~0;
host->csr.broadcast_channel = 0x80000000 | 31;
if (host->is_irm) {
if (host->driver->hw_csr_reg) {
host->driver->hw_csr_reg(host, 2, 0xfffffffe, ~0);
}
}
}
int hpsb_update_config_rom(struct hpsb_host *host, const quadlet_t *new_rom,
......@@ -268,6 +282,10 @@ static int read_regs(struct hpsb_host *host, int nodeid, quadlet_t *buf,
*(buf++) = cpu_to_be32(ret);
out;
case CSR_BROADCAST_CHANNEL:
*(buf++) = cpu_to_be32(host->csr.broadcast_channel);
out;
/* address gap to end - fall through to default */
default:
return RCODE_ADDRESS_ERROR;
......@@ -345,6 +363,12 @@ static int write_regs(struct hpsb_host *host, int nodeid, int destid,
/* these are not writable, only lockable */
return RCODE_TYPE_ERROR;
case CSR_BROADCAST_CHANNEL:
/* only the valid bit can be written */
host->csr.broadcast_channel = (host->csr.broadcast_channel & ~0x40000000)
| (be32_to_cpu(*data) & 0x40000000);
out;
/* address gap to end - fall through */
default:
return RCODE_ADDRESS_ERROR;
......@@ -373,6 +397,18 @@ static int lock_regs(struct hpsb_host *host, int nodeid, quadlet_t *store,
data = be32_to_cpu(data);
arg = be32_to_cpu(arg);
/* Is somebody releasing the broadcast_channel on us? */
if (csraddr == CSR_CHANNELS_AVAILABLE_HI && (data & 0x1)) {
/* Note: this is may not be the right way to handle
* the problem, so we should look into the proper way
* eventually. */
HPSB_WARN("Node [" NODE_BUS_FMT "] wants to release "
"broadcast channel 31. Ignoring.",
NODE_BUS_ARGS(nodeid));
data &= ~0x1; /* keep broadcast channel allocated */
}
if (host->driver->hw_csr_reg) {
quadlet_t old;
......@@ -389,23 +425,84 @@ static int lock_regs(struct hpsb_host *host, int nodeid, quadlet_t *store,
switch (csraddr) {
case CSR_BUS_MANAGER_ID:
regptr = &host->csr.bus_manager_id;
*store = cpu_to_be32(*regptr);
if (*regptr == arg)
*regptr = data;
break;
case CSR_BANDWIDTH_AVAILABLE:
{
quadlet_t bandwidth;
quadlet_t old;
quadlet_t new;
regptr = &host->csr.bandwidth_available;
old = *regptr;
/* bandwidth available algorithm adapted from IEEE 1394a-2000 spec */
if (arg > 0x1fff) {
*store = cpu_to_be32(old); /* change nothing */
break;
}
data &= 0x1fff;
if (arg >= data) {
/* allocate bandwidth */
bandwidth = arg - data;
if (old >= bandwidth) {
new = old - bandwidth;
*store = cpu_to_be32(arg);
*regptr = new;
} else {
*store = cpu_to_be32(old);
}
} else {
/* deallocate bandwidth */
bandwidth = data - arg;
if (old + bandwidth < 0x2000) {
new = old + bandwidth;
*store = cpu_to_be32(arg);
*regptr = new;
} else {
*store = cpu_to_be32(old);
}
}
break;
}
case CSR_CHANNELS_AVAILABLE_HI:
{
/* Lock algorithm for CHANNELS_AVAILABLE as recommended by 1394a-2000 */
quadlet_t affected_channels = arg ^ data;
regptr = &host->csr.channels_available_hi;
if ((arg & affected_channels) == (*regptr & affected_channels)) {
*regptr ^= affected_channels;
*store = cpu_to_be32(arg);
} else {
*store = cpu_to_be32(*regptr);
}
break;
}
case CSR_CHANNELS_AVAILABLE_LO:
{
/* Lock algorithm for CHANNELS_AVAILABLE as recommended by 1394a-2000 */
quadlet_t affected_channels = arg ^ data;
regptr = &host->csr.channels_available_lo;
if ((arg & affected_channels) == (*regptr & affected_channels)) {
*regptr ^= affected_channels;
*store = cpu_to_be32(arg);
} else {
*store = cpu_to_be32(*regptr);
}
break;
}
}
*store = cpu_to_be32(*regptr);
if (*regptr == arg) *regptr = data;
spin_unlock_irqrestore(&host->csr.lock, flags);
return RCODE_COMPLETE;
......@@ -420,10 +517,7 @@ static int lock_regs(struct hpsb_host *host, int nodeid, quadlet_t *store,
case CSR_SPLIT_TIMEOUT_LO:
case CSR_CYCLE_TIME:
case CSR_BUS_TIME:
case CSR_BUS_MANAGER_ID:
case CSR_BANDWIDTH_AVAILABLE:
case CSR_CHANNELS_AVAILABLE_HI:
case CSR_CHANNELS_AVAILABLE_LO:
case CSR_BROADCAST_CHANNEL:
return RCODE_TYPE_ERROR;
case CSR_BUSY_TIMEOUT:
......@@ -433,6 +527,97 @@ static int lock_regs(struct hpsb_host *host, int nodeid, quadlet_t *store,
}
}
static int lock64_regs(struct hpsb_host *host, int nodeid, octlet_t * store,
u64 addr, octlet_t data, octlet_t arg, int extcode, u16 fl)
{
int csraddr = addr - CSR_REGISTER_BASE;
unsigned long flags;
data = be64_to_cpu(data);
arg = be64_to_cpu(arg);
if (csraddr & 0x3)
return RCODE_TYPE_ERROR;
if (csraddr != CSR_CHANNELS_AVAILABLE
|| extcode != EXTCODE_COMPARE_SWAP)
goto unsupported_lock64req;
/* Is somebody releasing the broadcast_channel on us? */
if (csraddr == CSR_CHANNELS_AVAILABLE_HI && (data & 0x100000000ULL)) {
/* Note: this is may not be the right way to handle
* the problem, so we should look into the proper way
* eventually. */
HPSB_WARN("Node [" NODE_BUS_FMT "] wants to release "
"broadcast channel 31. Ignoring.",
NODE_BUS_ARGS(nodeid));
data &= ~0x100000000ULL; /* keep broadcast channel allocated */
}
if (host->driver->hw_csr_reg) {
quadlet_t data_hi, data_lo;
quadlet_t arg_hi, arg_lo;
quadlet_t old_hi, old_lo;
data_hi = data >> 32;
data_lo = data & 0xFFFFFFFF;
arg_hi = arg >> 32;
arg_lo = arg & 0xFFFFFFFF;
old_hi = host->driver->hw_csr_reg(host, (csraddr - CSR_BUS_MANAGER_ID) >> 2,
data_hi, arg_hi);
old_lo = host->driver->hw_csr_reg(host, ((csraddr + 4) - CSR_BUS_MANAGER_ID) >> 2,
data_lo, arg_lo);
*store = cpu_to_be64(((octlet_t)old_hi << 32) | old_lo);
} else {
octlet_t old;
octlet_t affected_channels = arg ^ data;
spin_lock_irqsave(&host->csr.lock, flags);
old = ((octlet_t)host->csr.channels_available_hi << 32) | host->csr.channels_available_lo;
if ((arg & affected_channels) == (old & affected_channels)) {
host->csr.channels_available_hi ^= (affected_channels >> 32);
host->csr.channels_available_lo ^= (affected_channels & 0xffffffff);
*store = cpu_to_be64(arg);
} else {
*store = cpu_to_be64(old);
}
spin_unlock_irqrestore(&host->csr.lock, flags);
}
/* Is somebody erroneously releasing the broadcast_channel on us? */
if (host->csr.channels_available_hi & 0x1)
host->csr.channels_available_hi &= ~0x1;
return RCODE_COMPLETE;
unsupported_lock64req:
switch (csraddr) {
case CSR_STATE_CLEAR:
case CSR_STATE_SET:
case CSR_RESET_START:
case CSR_NODE_IDS:
case CSR_SPLIT_TIMEOUT_HI:
case CSR_SPLIT_TIMEOUT_LO:
case CSR_CYCLE_TIME:
case CSR_BUS_TIME:
case CSR_BUS_MANAGER_ID:
case CSR_BROADCAST_CHANNEL:
case CSR_BUSY_TIMEOUT:
case CSR_BANDWIDTH_AVAILABLE:
return RCODE_TYPE_ERROR;
default:
return RCODE_ADDRESS_ERROR;
}
}
static int write_fcp(struct hpsb_host *host, int nodeid, int dest,
quadlet_t *data, u64 addr, unsigned int length, u16 flags)
{
......@@ -474,6 +659,7 @@ static struct hpsb_address_ops reg_ops = {
.read = read_regs,
.write = write_regs,
.lock = lock_regs,
.lock64 = lock64_regs,
};
static struct hpsb_highlevel *hl;
......
......@@ -16,8 +16,10 @@
#define CSR_BUSY_TIMEOUT 0x210
#define CSR_BUS_MANAGER_ID 0x21c
#define CSR_BANDWIDTH_AVAILABLE 0x220
#define CSR_CHANNELS_AVAILABLE 0x224
#define CSR_CHANNELS_AVAILABLE_HI 0x224
#define CSR_CHANNELS_AVAILABLE_LO 0x228
#define CSR_BROADCAST_CHANNEL 0x234
#define CSR_CONFIG_ROM 0x400
#define CSR_CONFIG_ROM_END 0x800
#define CSR_FCP_COMMAND 0xB00
......@@ -40,6 +42,7 @@ struct csr_control {
quadlet_t bus_manager_id;
quadlet_t bandwidth_available;
quadlet_t channels_available_hi, channels_available_lo;
quadlet_t broadcast_channel;
quadlet_t *rom;
size_t rom_size;
......
/*
* DMA region bookkeeping routines
*
* Copyright (C) 2002 Maas Digital LLC
*
* This code is licensed under the GPL. See the file COPYING in the root
* directory of the kernel sources for details.
*/
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include "dma.h"
/* dma_prog_region */
void dma_prog_region_init(struct dma_prog_region *prog)
{
prog->kvirt = NULL;
prog->dev = NULL;
prog->n_pages = 0;
prog->bus_addr = 0;
}
int dma_prog_region_alloc(struct dma_prog_region *prog, unsigned long n_bytes, struct pci_dev *dev)
{
/* round up to page size */
if(n_bytes % PAGE_SIZE)
n_bytes += PAGE_SIZE - (n_bytes & PAGE_SIZE);
prog->n_pages = n_bytes / PAGE_SIZE;
prog->kvirt = pci_alloc_consistent(dev, prog->n_pages * PAGE_SIZE, &prog->bus_addr);
if(!prog->kvirt) {
printk(KERN_ERR "dma_prog_region_alloc: pci_alloc_consistent() failed\n");
dma_prog_region_free(prog);
return -ENOMEM;
}
prog->dev = dev;
return 0;
}
void dma_prog_region_free(struct dma_prog_region *prog)
{
if(prog->kvirt) {
pci_free_consistent(prog->dev, prog->n_pages * PAGE_SIZE, prog->kvirt, prog->bus_addr);
}
prog->kvirt = NULL;
prog->dev = NULL;
prog->n_pages = 0;
prog->bus_addr = 0;
}
/* dma_region */
void dma_region_init(struct dma_region *dma)
{
dma->kvirt = NULL;
dma->dev = NULL;
dma->n_pages = 0;
dma->n_dma_pages = 0;
dma->sglist = NULL;
}
int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes, struct pci_dev *dev, int direction)
{
unsigned int i, n_pages;
/* round up to page size */
if(n_bytes % PAGE_SIZE)
n_bytes += PAGE_SIZE - (n_bytes & PAGE_SIZE);
n_pages = n_bytes / PAGE_SIZE;
dma->kvirt = vmalloc_32(n_pages * PAGE_SIZE);
if(!dma->kvirt) {
printk(KERN_ERR "dma_region_alloc: vmalloc_32() failed\n");
goto err;
}
dma->n_pages = n_pages;
/* Clear the ram out, no junk to the user */
memset(dma->kvirt, 0, n_pages * PAGE_SIZE);
/* allocate scatter/gather list */
dma->sglist = kmalloc(dma->n_pages * sizeof(struct scatterlist), GFP_KERNEL);
if(!dma->sglist) {
printk(KERN_ERR "dma_region_alloc: kmalloc(sglist) failed\n");
goto err;
}
/* just to be safe - this will become unnecessary once sglist->address goes away */
memset(dma->sglist, 0, dma->n_pages * sizeof(struct scatterlist));
/* fill scatter/gather list with pages */
for(i = 0; i < dma->n_pages; i++) {
unsigned long va = (unsigned long) dma->kvirt + i * PAGE_SIZE;
dma->sglist[i].page = vmalloc_to_page((void *)va);
dma->sglist[i].length = PAGE_SIZE;
}
/* map sglist to the IOMMU */
dma->n_dma_pages = pci_map_sg(dev, &dma->sglist[0], dma->n_pages, direction);
if(dma->n_dma_pages == 0) {
printk(KERN_ERR "dma_region_alloc: pci_map_sg() failed\n");
goto err;
}
dma->dev = dev;
dma->direction = direction;
return 0;
err:
dma_region_free(dma);
return -ENOMEM;
}
void dma_region_free(struct dma_region *dma)
{
if(dma->n_dma_pages) {
pci_unmap_sg(dma->dev, dma->sglist, dma->n_pages, dma->direction);
dma->n_dma_pages = 0;
dma->dev = NULL;
}
if(dma->sglist) {
kfree(dma->sglist);
dma->sglist = NULL;
}
if(dma->kvirt) {
vfree(dma->kvirt);
dma->kvirt = NULL;
dma->n_pages = 0;
}
}
/* find the scatterlist index and remaining offset corresponding to a
given offset from the beginning of the buffer */
static inline int dma_region_find(struct dma_region *dma, unsigned long offset, unsigned long *rem)
{
int i;
unsigned long off = offset;
for(i = 0; i < dma->n_dma_pages; i++) {
if(off < sg_dma_len(&dma->sglist[i])) {
*rem = off;
return i;
}
off -= sg_dma_len(&dma->sglist[i]);
}
panic("dma_region_find: offset %lu beyond end of DMA mapping\n", offset);
}
dma_addr_t dma_region_offset_to_bus(struct dma_region *dma, unsigned long offset)
{
unsigned long rem;
struct scatterlist *sg = &dma->sglist[dma_region_find(dma, offset, &rem)];
return sg_dma_address(sg) + rem;
}
void dma_region_sync(struct dma_region *dma, unsigned long offset, unsigned long len)
{
int first, last;
unsigned long rem;
if(!len)
len = 1;
first = dma_region_find(dma, offset, &rem);
last = dma_region_find(dma, offset + len - 1, &rem);
pci_dma_sync_sg(dma->dev, &dma->sglist[first], last - first + 1, dma->direction);
}
/* nopage() handler for mmap access */
static struct page*
dma_region_pagefault(struct vm_area_struct *area, unsigned long address, int write_access)
{
unsigned long offset;
unsigned long kernel_virt_addr;
struct page *ret = NOPAGE_SIGBUS;
struct dma_region *dma = (struct dma_region*) area->vm_private_data;
if(!dma->kvirt)
goto out;
if( (address < (unsigned long) area->vm_start) ||
(address > (unsigned long) area->vm_start + (PAGE_SIZE * dma->n_pages)) )
goto out;
offset = address - area->vm_start;
kernel_virt_addr = (unsigned long) dma->kvirt + offset;
ret = vmalloc_to_page((void*) kernel_virt_addr);
get_page(ret);
out:
return ret;
}
static struct vm_operations_struct dma_region_vm_ops = {
nopage: dma_region_pagefault,
};
int dma_region_mmap(struct dma_region *dma, struct file *file, struct vm_area_struct *vma)
{
unsigned long size;
if(!dma->kvirt)
return -EINVAL;
/* must be page-aligned */
if(vma->vm_pgoff != 0)
return -EINVAL;
/* check the length */
size = vma->vm_end - vma->vm_start;
if(size > (PAGE_SIZE * dma->n_pages))
return -EINVAL;
vma->vm_ops = &dma_region_vm_ops;
vma->vm_private_data = dma;
vma->vm_file = file;
vma->vm_flags |= VM_RESERVED;
return 0;
}
/*
* DMA region bookkeeping routines
*
* Copyright (C) 2002 Maas Digital LLC
*
* This code is licensed under the GPL. See the file COPYING in the root
* directory of the kernel sources for details.
*/
#ifndef IEEE1394_DMA_H
#define IEEE1394_DMA_H
#include <linux/pci.h>
#include <asm/scatterlist.h>
/* struct dma_prog_region
a small, physically-contiguous DMA buffer with random-access,
synchronous usage characteristics
*/
struct dma_prog_region {
unsigned char *kvirt; /* kernel virtual address */
struct pci_dev *dev; /* PCI device */
unsigned int n_pages; /* # of kernel pages */
dma_addr_t bus_addr; /* base bus address */
};
/* clear out all fields but do not allocate any memory */
void dma_prog_region_init(struct dma_prog_region *prog);
int dma_prog_region_alloc(struct dma_prog_region *prog, unsigned long n_bytes, struct pci_dev *dev);
void dma_prog_region_free(struct dma_prog_region *prog);
static inline dma_addr_t dma_prog_region_offset_to_bus(struct dma_prog_region *prog, unsigned long offset)
{
return prog->bus_addr + offset;
}
/* struct dma_region
a large, non-physically-contiguous DMA buffer with streaming,
asynchronous usage characteristics
*/
struct dma_region {
unsigned char *kvirt; /* kernel virtual address */
struct pci_dev *dev; /* PCI device */
unsigned int n_pages; /* # of kernel pages */
unsigned int n_dma_pages; /* # of IOMMU pages */
struct scatterlist *sglist; /* IOMMU mapping */
int direction; /* PCI_DMA_TODEVICE, etc */
};
/* clear out all fields but do not allocate anything */
void dma_region_init(struct dma_region *dma);
/* allocate the buffer and map it to the IOMMU */
int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes, struct pci_dev *dev, int direction);
/* unmap and free the buffer */
void dma_region_free(struct dma_region *dma);
/* sync the IO bus' view of the buffer with the CPU's view */
void dma_region_sync(struct dma_region *dma, unsigned long offset, unsigned long len);
/* map the buffer into a user space process */
int dma_region_mmap(struct dma_region *dma, struct file *file, struct vm_area_struct *vma);
/* macro to index into a DMA region (or dma_prog_region) */
#define dma_region_i(_dma, _type, _index) ( ((_type*) ((_dma)->kvirt)) + (_index) )
/* return the DMA bus address of the byte with the given offset
relative to the beginning of the dma_region */
dma_addr_t dma_region_offset_to_bus(struct dma_region *dma, unsigned long offset);
#endif /* IEEE1394_DMA_H */
......@@ -28,8 +28,7 @@
#include "ieee1394.h"
#include "ohci1394.h"
#include <linux/pci.h>
#include <asm/scatterlist.h>
#include "dma.h"
/* data structures private to the dv1394 driver */
/* none of this is exposed to user-space */
......@@ -167,12 +166,14 @@ static inline void fill_input_more(struct input_more *im,
}
static inline void fill_input_last(struct input_last *il,
int want_interrupt,
unsigned int data_size,
unsigned long data_phys_addr)
{
u32 temp = 3 << 28; /* INPUT_LAST */
temp |= 8 << 24; /* s = 1, update xferStatus and resCount */
temp |= 3 << 20; /* enable interrupts */
if (want_interrupt)
temp |= 3 << 20; /* enable interrupts */
temp |= 0xC << 16; /* enable branch to address */
/* disable wait on sync field, not used in DV :-( */
temp |= data_size;
......@@ -301,8 +302,7 @@ struct frame {
unsigned long data;
/* Max # of packets per frame */
/* 320 is enough for NTSC, need to check what PAL is */
#define MAX_PACKETS 500
#define MAX_PACKETS 500
/* a PAGE_SIZE memory pool for allocating CIP headers
......@@ -383,35 +383,6 @@ static void frame_delete(struct frame *f);
/* reset f so that it can be used again */
static void frame_reset(struct frame *f);
/* structure for bookkeeping of a large non-physically-contiguous DMA buffer */
struct dma_region {
unsigned int n_pages;
unsigned int n_dma_pages;
struct scatterlist *sglist;
};
/* return the DMA bus address of the byte with the given offset
relative to the beginning of the dma_region */
static inline dma_addr_t dma_offset_to_bus(struct dma_region *dma, unsigned long offset)
{
int i;
struct scatterlist *sg;
for(i = 0, sg = &dma->sglist[0]; i < dma->n_dma_pages; i++, sg++) {
if(offset < sg_dma_len(sg)) {
return sg_dma_address(sg) + offset;
}
offset -= sg_dma_len(sg);
}
printk(KERN_ERR "dv1394: dma_offset_to_bus failed for offset %lu!\n", offset);
return 0;
}
/* struct video_card contains all data associated with one instance
of the dv1394 driver
*/
......@@ -508,9 +479,8 @@ struct video_card {
/* the large, non-contiguous (rvmalloc()) ringbuffer for DV
data, exposed to user-space via mmap() */
unsigned char *user_buf;
unsigned long user_buf_size;
struct dma_region user_dma;
unsigned long dv_buf_size;
struct dma_region dv_buf;
/* next byte in the ringbuffer that a write() call will fill */
size_t write_off;
......@@ -579,10 +549,8 @@ struct video_card {
/* physically contiguous packet ringbuffer for receive */
#define MAX_PACKET_BUFFER 30
struct packet *packet_buffer;
dma_addr_t packet_buffer_dma;
unsigned long packet_buffer_size;
struct dma_region packet_buf;
unsigned long packet_buf_size;
unsigned int current_packet;
int first_frame; /* received first start frame marker? */
......
This diff is collapsed.
......@@ -55,9 +55,9 @@
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/skbuff.h>
#include <linux/bitops.h>
#include <asm/delay.h>
#include <asm/semaphore.h>
#include <asm/bitops.h>
#include <net/arp.h>
#include "ieee1394_types.h"
......@@ -77,7 +77,7 @@
printk(KERN_ERR fmt, ## args)
static char version[] __devinitdata =
"$Rev: 601 $ Ben Collins <bcollins@debian.org>";
"$Rev: 641 $ Ben Collins <bcollins@debian.org>";
/* Our ieee1394 highlevel driver */
#define ETHER1394_DRIVER_NAME "ether1394"
......@@ -360,7 +360,7 @@ static void ether1394_add_host (struct hpsb_host *host)
priv->host = host;
hi = (struct host_info *)kmalloc (sizeof (struct host_info),
GFP_KERNEL);
in_interrupt() ? SLAB_ATOMIC : SLAB_KERNEL);
if (hi == NULL)
goto out;
......@@ -682,6 +682,8 @@ static int ether1394_tx (struct sk_buff *skb, struct net_device *dev)
ptask->skb = skb;
ptask->addr = addr;
ptask->dest_node = dest_node;
/* TODO: When 2.4 is out of the way, give each of our ethernet
* dev's a workqueue to handle these. */
HPSB_INIT_WORK(&ptask->tq, hpsb_write_sched, ptask);
hpsb_schedule_work(&ptask->tq);
......
......@@ -28,7 +28,7 @@
LIST_HEAD(hl_drivers);
rwlock_t hl_drivers_lock = RW_LOCK_UNLOCKED;
static DECLARE_MUTEX(hl_drivers_lock);
LIST_HEAD(addr_space);
rwlock_t addr_space_lock = RW_LOCK_UNLOCKED;
......@@ -53,11 +53,11 @@ struct hpsb_highlevel *hpsb_register_highlevel(const char *name,
hl->name = name;
hl->op = ops;
write_lock_irq(&hl_drivers_lock);
down(&hl_drivers_lock);
list_add_tail(&hl->hl_list, &hl_drivers);
write_unlock_irq(&hl_drivers_lock);
up(&hl_drivers_lock);
hl_all_hosts(hl->op->add_host);
hl_all_hosts(hl->op->add_host);
return hl;
}
......@@ -82,9 +82,9 @@ void hpsb_unregister_highlevel(struct hpsb_highlevel *hl)
}
write_unlock_irq(&addr_space_lock);
write_lock_irq(&hl_drivers_lock);
down(&hl_drivers_lock);
list_del(&hl->hl_list);
write_unlock_irq(&hl_drivers_lock);
up(&hl_drivers_lock);
if (hl->op->remove_host)
hl_all_hosts(hl->op->remove_host);
......@@ -119,10 +119,8 @@ int hpsb_register_addrspace(struct hpsb_highlevel *hl,
write_lock_irq(&addr_space_lock);
entry = addr_space.next;
while (list_entry(entry, struct hpsb_address_serve, as_list)->end
<= start) {
if (list_entry(entry->next, struct hpsb_address_serve, as_list)
->start >= end) {
while (list_entry(entry, struct hpsb_address_serve, as_list)->end <= start) {
if (list_entry(entry->next, struct hpsb_address_serve, as_list)->start >= end) {
list_add(&as->as_list, entry);
list_add_tail(&as->addr_list, &hl->addr_list);
retval = 1;
......@@ -198,13 +196,13 @@ void highlevel_add_host(struct hpsb_host *host)
struct list_head *entry;
struct hpsb_highlevel *hl;
read_lock(&hl_drivers_lock);
down(&hl_drivers_lock);
list_for_each(entry, &hl_drivers) {
hl = list_entry(entry, struct hpsb_highlevel, hl_list);
hl->op->add_host(host);
}
read_unlock(&hl_drivers_lock);
up(&hl_drivers_lock);
}
void highlevel_remove_host(struct hpsb_host *host)
......@@ -212,14 +210,14 @@ void highlevel_remove_host(struct hpsb_host *host)
struct list_head *entry;
struct hpsb_highlevel *hl;
write_lock_irq(&hl_drivers_lock);
down(&hl_drivers_lock);
list_for_each(entry, &hl_drivers) {
hl = list_entry(entry, struct hpsb_highlevel, hl_list);
if (hl->op->remove_host)
hl->op->remove_host(host);
}
write_unlock_irq(&hl_drivers_lock);
up(&hl_drivers_lock);
}
void highlevel_host_reset(struct hpsb_host *host)
......@@ -227,14 +225,14 @@ void highlevel_host_reset(struct hpsb_host *host)
struct list_head *entry;
struct hpsb_highlevel *hl;
read_lock(&hl_drivers_lock);
down(&hl_drivers_lock);
list_for_each(entry, &hl_drivers) {
hl = list_entry(entry, struct hpsb_highlevel, hl_list);
if (hl->op->host_reset)
hl->op->host_reset(host);
}
read_unlock(&hl_drivers_lock);
up(&hl_drivers_lock);
}
void highlevel_iso_receive(struct hpsb_host *host, quadlet_t *data,
......@@ -244,7 +242,7 @@ void highlevel_iso_receive(struct hpsb_host *host, quadlet_t *data,
struct hpsb_highlevel *hl;
int channel = (data[0] >> 8) & 0x3f;
read_lock(&hl_drivers_lock);
down(&hl_drivers_lock);
entry = hl_drivers.next;
while (entry != &hl_drivers) {
......@@ -254,7 +252,7 @@ void highlevel_iso_receive(struct hpsb_host *host, quadlet_t *data,
}
entry = entry->next;
}
read_unlock(&hl_drivers_lock);
up(&hl_drivers_lock);
}
void highlevel_fcp_request(struct hpsb_host *host, int nodeid, int direction,
......@@ -264,7 +262,7 @@ void highlevel_fcp_request(struct hpsb_host *host, int nodeid, int direction,
struct hpsb_highlevel *hl;
int cts = data[0] >> 4;
read_lock(&hl_drivers_lock);
down(&hl_drivers_lock);
entry = hl_drivers.next;
while (entry != &hl_drivers) {
......@@ -275,7 +273,7 @@ void highlevel_fcp_request(struct hpsb_host *host, int nodeid, int direction,
}
entry = entry->next;
}
read_unlock(&hl_drivers_lock);
up(&hl_drivers_lock);
}
int highlevel_read(struct hpsb_host *host, int nodeid, quadlet_t *buffer,
......
......@@ -121,6 +121,7 @@ void hpsb_unref_host(struct hpsb_host *host)
struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra)
{
struct hpsb_host *h;
int i;
h = kmalloc(sizeof(struct hpsb_host) + extra, SLAB_KERNEL);
if (!h) return NULL;
......@@ -133,8 +134,8 @@ struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra)
INIT_LIST_HEAD(&h->pending_packets);
spin_lock_init(&h->pending_pkt_lock);
sema_init(&h->tlabel_count, 64);
spin_lock_init(&h->tlabel_lock);
for (i = 0; i < ARRAY_SIZE(h->tpool); i++)
HPSB_TPOOL_INIT(&h->tpool[i]);
atomic_set(&h->generation, 0);
......
......@@ -18,6 +18,7 @@
#define CSR_CONFIG_ROM_SIZE 0x100
struct hpsb_packet;
struct hpsb_iso;
struct hpsb_host {
struct list_head host_list;
......@@ -32,13 +33,6 @@ struct hpsb_host {
spinlock_t pending_pkt_lock;
struct hpsb_queue_struct timeout_tq;
/* A bitmask where a set bit means that this tlabel is in use.
* FIXME - should be handled per node instead of per bus. */
u32 tlabel_pool[2];
struct semaphore tlabel_count;
spinlock_t tlabel_lock;
u32 tlabel_current;
unsigned char iso_listen_count[64];
int node_count; /* number of identified nodes on this bus */
......@@ -64,6 +58,9 @@ struct hpsb_host {
u8 *speed_map;
struct csr_control csr;
/* Per node tlabel pool allocation */
struct hpsb_tlabel_pool tpool[64];
struct hpsb_host_driver *driver;
struct pci_dev *pdev;
......@@ -108,6 +105,28 @@ enum devctl_cmd {
ISO_UNLISTEN_CHANNEL
};
enum isoctl_cmd {
/* rawiso API - see iso.h for the meanings of these commands
* INIT = allocate resources
* START = begin transmission/reception (arg: cycle to start on)
* STOP = halt transmission/reception
* QUEUE/RELEASE = produce/consume packets (arg: # of packets)
* SHUTDOWN = deallocate resources
*/
XMIT_INIT,
XMIT_START,
XMIT_STOP,
XMIT_QUEUE,
XMIT_SHUTDOWN,
RECV_INIT,
RECV_START,
RECV_STOP,
RECV_RELEASE,
RECV_SHUTDOWN,
};
enum reset_types {
/* 166 microsecond reset -- only type of reset available on
non-1394a capable IEEE 1394 controllers */
......@@ -115,7 +134,13 @@ enum reset_types {
/* Short (arbitrated) reset -- only available on 1394a capable
IEEE 1394 capable controllers */
SHORT_RESET
SHORT_RESET,
/* Variants, that set force_root before issueing the bus reset */
LONG_RESET_FORCE_ROOT, SHORT_RESET_FORCE_ROOT,
/* Variants, that clear force_root before issueing the bus reset */
LONG_RESET_NO_FORCE_ROOT, SHORT_RESET_NO_FORCE_ROOT
};
struct hpsb_host_driver {
......@@ -145,6 +170,11 @@ struct hpsb_host_driver {
*/
int (*devctl) (struct hpsb_host *host, enum devctl_cmd command, int arg);
/* ISO transmission/reception functions. Return 0 on success, -1 on failure.
* If the low-level driver does not support the new ISO API, set isoctl to NULL.
*/
int (*isoctl) (struct hpsb_iso *iso, enum isoctl_cmd command, int arg);
/* This function is mainly to redirect local CSR reads/locks to the iso
* management registers (bus manager id, bandwidth available, channels
* available) to the hardware registers in OHCI. reg is 0,1,2,3 for bus
......@@ -156,9 +186,6 @@ struct hpsb_host_driver {
quadlet_t data, quadlet_t compare);
};
/* core internal use */
void register_builtin_lowlevels(void);
/* high level internal use */
struct hpsb_highlevel;
void hl_all_hosts(void (*function)(struct hpsb_host*));
......
......@@ -15,6 +15,7 @@
#define TCODE_CYCLE_START 0x8
#define TCODE_LOCK_REQUEST 0x9
#define TCODE_ISO_DATA 0xa
#define TCODE_STREAM_DATA 0xa
#define TCODE_LOCK_RESPONSE 0xb
#define RCODE_COMPLETE 0x0
......
This diff is collapsed.
......@@ -68,7 +68,10 @@ struct hpsb_packet {
/* Very core internal, don't care. */
struct semaphore state_change;
struct list_head complete_tq;
/* Function (and possible data to pass to it) to call when this
* packet is completed. */
void (*complete_routine)(void *);
void *complete_data;
/* Store jiffies for implementing bus timeouts. */
unsigned long sendtime;
......@@ -76,8 +79,9 @@ struct hpsb_packet {
quadlet_t embedded_header[5];
};
/* add a new task for when a packet completes */
void hpsb_add_packet_complete_task(struct hpsb_packet *packet, struct hpsb_queue_struct *tq);
/* Set a task for when a packet completes */
void hpsb_set_packet_complete_task(struct hpsb_packet *packet,
void (*routine)(void *), void *data);
static inline struct hpsb_packet *driver_packet(struct list_head *l)
{
......@@ -136,6 +140,12 @@ void hpsb_selfid_received(struct hpsb_host *host, quadlet_t sid);
*/
void hpsb_selfid_complete(struct hpsb_host *host, int phyid, int isroot);
/*
* Check bus reset results to find cycle master
*/
void hpsb_check_cycle_master(struct hpsb_host *host);
/*
* Notify core of sending a packet. Ackcode is the ack code returned for async
* transmits or ACKX_SEND_ERROR if the transmission failed completely; ACKX_NONE
......
This diff is collapsed.
......@@ -4,49 +4,27 @@
#include "ieee1394_core.h"
/*
* Utility functions to fill out packet headers.
*/
void fill_async_readquad(struct hpsb_packet *packet, u64 addr);
void fill_async_readquad_resp(struct hpsb_packet *packet, int rcode,
quadlet_t data);
void fill_async_readblock(struct hpsb_packet *packet, u64 addr, int length);
void fill_async_readblock_resp(struct hpsb_packet *packet, int rcode,
int length);
void fill_async_writequad(struct hpsb_packet *packet, u64 addr, quadlet_t data);
void fill_async_writeblock(struct hpsb_packet *packet, u64 addr, int length);
void fill_async_write_resp(struct hpsb_packet *packet, int rcode);
void fill_async_lock(struct hpsb_packet *packet, u64 addr, int extcode,
int length);
void fill_async_lock_resp(struct hpsb_packet *packet, int rcode, int extcode,
int length);
void fill_iso_packet(struct hpsb_packet *packet, int length, int channel,
int tag, int sync);
void fill_phy_packet(struct hpsb_packet *packet, quadlet_t data);
/*
* Get and free transaction labels.
*/
int get_tlabel(struct hpsb_host *host, nodeid_t nodeid, int wait);
void free_tlabel(struct hpsb_host *host, nodeid_t nodeid, int tlabel);
int hpsb_get_tlabel(struct hpsb_packet *packet, int wait);
void hpsb_free_tlabel(struct hpsb_packet *packet);
struct hpsb_packet *hpsb_make_readqpacket(struct hpsb_host *host, nodeid_t node,
u64 addr);
struct hpsb_packet *hpsb_make_readbpacket(struct hpsb_host *host, nodeid_t node,
u64 addr, size_t length);
struct hpsb_packet *hpsb_make_writeqpacket(struct hpsb_host *host,
nodeid_t node, u64 addr,
quadlet_t data);
struct hpsb_packet *hpsb_make_writebpacket(struct hpsb_host *host,
nodeid_t node, u64 addr,
size_t length);
struct hpsb_packet *hpsb_make_readpacket(struct hpsb_host *host, nodeid_t node,
u64 addr, size_t length);
struct hpsb_packet *hpsb_make_lockpacket(struct hpsb_host *host, nodeid_t node,
u64 addr, int extcode);
u64 addr, int extcode, quadlet_t *data,
quadlet_t arg);
struct hpsb_packet *hpsb_make_lock64packet(struct hpsb_host *host, nodeid_t node,
u64 addr, int extcode);
u64 addr, int extcode, octlet_t *data,
octlet_t arg);
struct hpsb_packet *hpsb_make_phypacket(struct hpsb_host *host,
quadlet_t data) ;
struct hpsb_packet *hpsb_make_isopacket(struct hpsb_host *host,
int length, int channel,
int tag, int sync);
struct hpsb_packet *hpsb_make_writepacket (struct hpsb_host *host, nodeid_t node,
u64 addr, quadlet_t *buffer, size_t length);
/*
* hpsb_packet_success - Make sense of the ack and reply codes and
......@@ -75,10 +53,7 @@ int hpsb_write(struct hpsb_host *host, nodeid_t node, unsigned int generation,
u64 addr, quadlet_t *buffer, size_t length);
int hpsb_lock(struct hpsb_host *host, nodeid_t node, unsigned int generation,
u64 addr, int extcode, quadlet_t *data, quadlet_t arg);
/* Generic packet creation. Used by hpsb_write. Also useful for protocol
* drivers that want to implement their own hpsb_write replacement. */
struct hpsb_packet *hpsb_make_packet (struct hpsb_host *host, nodeid_t node,
u64 addr, quadlet_t *buffer, size_t length);
int hpsb_lock64(struct hpsb_host *host, nodeid_t node, unsigned int generation,
u64 addr, int extcode, octlet_t *data, octlet_t arg);
#endif /* _IEEE1394_TRANSACTIONS_H */
......@@ -8,6 +8,7 @@
#include <linux/list.h>
#include <linux/init.h>
#include <linux/string.h>
#include <asm/semaphore.h>
#include <asm/byteorder.h>
......@@ -62,6 +63,30 @@
#define HPSB_PREPARE_WORK(x,y,z) PREPARE_WORK(x,y,z)
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,44)
/* pci_pool_create changed. does not take the flags arg any longer */
#define hpsb_pci_pool_create(a,b,c,d,e,f) pci_pool_create(a,b,c,d,e,f)
#else
#define hpsb_pci_pool_create(a,b,c,d,e,f) pci_pool_create(a,b,c,d,e)
#endif
/* Transaction Label handling */
struct hpsb_tlabel_pool {
u64 pool;
spinlock_t lock;
u8 next;
u32 allocations;
struct semaphore count;
};
#define HPSB_TPOOL_INIT(_tp) \
do { \
sema_init(&(_tp)->count, 63); \
spin_lock_init(&(_tp)->lock); \
(_tp)->next = 0; \
(_tp)->pool = 0; \
} while(0)
typedef u32 quadlet_t;
typedef u64 octlet_t;
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -132,7 +132,11 @@ struct node_entry {
u32 capabilities;
struct list_head unit_directories;
struct hpsb_tlabel_pool *tpool;
const char *vendor_name;
char *oui_name;
quadlet_t quadlets[0];
};
......@@ -152,6 +156,10 @@ struct node_entry *hpsb_guid_get_entry(u64 guid);
* fool-proof by itself, since the nodeid can change. */
struct node_entry *hpsb_nodeid_get_entry(nodeid_t nodeid);
/* Same as above except that it will not block waiting for the nodemgr
* serialize semaphore. */
struct node_entry *hpsb_check_nodeid(nodeid_t nodeid);
/*
* If the entry refers to a local host, this function will return the pointer
* to the hpsb_host structure. It will return NULL otherwise. Once you have
......
This diff is collapsed.
......@@ -95,6 +95,7 @@ struct dma_rcv_ctx {
/* dma block descriptors */
struct dma_cmd **prg_cpu;
dma_addr_t *prg_bus;
struct pci_pool *prg_pool;
/* dma buffers */
quadlet_t **buf_cpu;
......@@ -120,6 +121,7 @@ struct dma_trm_ctx {
/* dma block descriptors */
struct at_dma_prg **prg_cpu;
dma_addr_t *prg_bus;
struct pci_pool *prg_pool;
unsigned int prg_ind;
unsigned int sent_ind;
......@@ -292,6 +294,9 @@ static inline u32 reg_read(const struct ti_ohci *ohci, int offset)
#define OHCI1394_IsoRecvIntEventClear 0x0A4
#define OHCI1394_IsoRecvIntMaskSet 0x0A8
#define OHCI1394_IsoRecvIntMaskClear 0x0AC
#define OHCI1394_InitialBandwidthAvailable 0x0B0
#define OHCI1394_InitialChannelsAvailableHi 0x0B4
#define OHCI1394_InitialChannelsAvailableLo 0x0B8
#define OHCI1394_FairnessControl 0x0DC
#define OHCI1394_LinkControlSet 0x0E0
#define OHCI1394_LinkControlClear 0x0E4
......
This diff is collapsed.
#!/bin/sh
cat <<EOF
/* Generated file for OUI database */
#include <linux/config.h>
#ifdef CONFIG_IEEE1394_OUI_DB
struct oui_list_struct {
int oui;
char *name;
} oui_list[] = {
EOF
while read oui name; do
echo " { 0x$oui, \"$name\" },"
done
cat <<EOF
};
#endif /* CONFIG_IEEE1394_OUI_DB */
EOF
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -37,8 +37,7 @@
#include <linux/proc_fs.h>
#include <linux/delay.h>
#include <linux/devfs_fs_kernel.h>
#include <asm/bitops.h>
#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/wrapper.h>
#include <linux/vmalloc.h>
......@@ -1249,18 +1248,15 @@ static int video1394_ioctl(struct inode *inode, struct file *file,
int video1394_mmap(struct file *file, struct vm_area_struct *vma)
{
struct file_ctx *ctx = (struct file_ctx *)file->private_data;
struct video_card *video = ctx->video;
struct ti_ohci *ohci = video->ohci;
int res = -EINVAL;
lock_kernel();
ohci = video->ohci;
if (ctx->current_ctx == NULL) {
PRINT(KERN_ERR, ohci->id, "Current iso context not set");
PRINT(KERN_ERR, ctx->video->ohci->id, "Current iso context not set");
} else
res = do_iso_mmap(ohci, ctx->current_ctx, vma);
res = do_iso_mmap(ctx->video->ohci, ctx->current_ctx, vma);
unlock_kernel();
return res;
}
......
......@@ -986,6 +986,10 @@ static int copy_params(struct dm_ioctl *user, struct dm_ioctl **param)
static int validate_params(uint cmd, struct dm_ioctl *param)
{
/* Ignores parameters */
if (cmd == DM_REMOVE_ALL_CMD)
return 0;
/* Unless creating, either name of uuid but not both */
if (cmd != DM_DEV_CREATE_CMD) {
if ((!*param->uuid && !*param->name) ||
......
......@@ -43,7 +43,7 @@ static int linear_ctr(struct dm_target *ti, int argc, char **argv)
goto bad;
}
if (dm_get_device(ti, argv[0], ti->begin, ti->len,
if (dm_get_device(ti, argv[0], lc->start, ti->len,
dm_table_get_mode(ti->table), &lc->dev)) {
ti->error = "dm-linear: Device lookup failed";
goto bad;
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -29,6 +29,8 @@
#define SECTOR_FORMAT "%lu"
#endif
#define SECTOR_SHIFT 9
extern struct block_device_operations dm_blk_dops;
/*
......
......@@ -1525,7 +1525,7 @@ static void set_multicast_list(struct net_device *dev)
}
#ifdef MODULE
static struct net_device dev_82596 = { init: i82596_probe };
static struct net_device dev_82596 = { .init = i82596_probe };
#ifdef ENABLE_APRICOT
static int io = 0x300;
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment