Commit 0700dca1 authored by Linus Torvalds's avatar Linus Torvalds

Import 2.3.27pre2

parent ce84b36d
......@@ -7919,7 +7919,7 @@ CONFIG_USB_PROC
Note that you must say Y to "/proc filesystem support" below for
this to work.
Generic ACPI support
ACPI support
CONFIG_ACPI
Advanced Configuration and Power Interface (ACPI) is an interface
specification to support power management of peripherals. If your
......@@ -7930,13 +7930,6 @@ CONFIG_ACPI
The module will be called acpi.o. If you want to compile it as a
module, say M here and read Documentation/modules.txt.
PIIX4 ACPI support
CONFIG_PIIX4_ACPI
If you have a PIIX4 based motherboard (PCI ISA IDE Xcelerator
(PIIX4) is a multi-function PCI device) and you want support for
Advanced Configuration and Power Interface (ACPI) to support power
management of peripherals, say Y here.
Minix fs support
CONFIG_MINIX_FS
Minix is a simple operating system used in many classes about OS's.
......
ACPI Driver Interface
---------------------
Overview:
1) Register each instance of a device with "acpi_register"
2) Call "acpi_access" before accessing the hardware.
(this will ensure that the hardware is awake and ready)
3) "acpi_transition" callback is called before entering D1-D3
or after entering D0
4) Call "acpi_dev_idle" when the device is not being used
(not required by will improve idle detection)
5) When unloaded, unregister the device with "acpi_unregister"
/*
* Description: Register a device with the ACPI subsystem
*
* Parameters:
* type - device type
* adr - bus number and address or unique id
* hid - PnP identifier (or 0 if unknown)
* trans - device state transition callback
*
* Returns: Registered ACPI device or NULL on error
*
* Details: The device type, bus number, and bus address should be
* enough information to reconstruct the device tree and
* identify device dependencies
*
* Examples:
* dev = acpi_register(ACPI_SYS_DEV, 0, ACPI_VGA_HID, vga_trans);
*
* struct pci_dev *pci_dev = pci_find_dev(...);
* dev = acpi_register(ACPI_PCI_DEV, ACPI_PCI_ADR(pci_dev), 0, trans);
*/
struct acpi_dev *acpi_register(acpi_dev_t type,
unsigned long adr,
acpi_hid_t hid,
acpi_transition trans);
/*
* Description: Unregister a device with ACPI
*
* Parameters:
* dev - ACPI device previously returned from acpi_register
*/
void acpi_unregister(struct acpi_dev *dev);
/*
* Device idle/use detection
*
* In general, drivers for all devices should call "acpi_access"
* before accessing the hardware (ie. before reading or modifying
* a hardware register). Request or packet-driven drivers should
* additionally call "acpi_idle" when a device is not being used.
*
* Examples:
* 1) A keyboard driver would call acpi_access whenever a key is pressed
* 2) A network driver would call acpi_access before submitting
* a packet for transmit or receive and acpi_idle when its
* transfer and receive queues are empty.
* 3) A VGA driver would call acpi_access before it accesses any
* of the video controller registers
*
* Ultimately, the ACPI policy manager uses the access and idle
* information to decide when to transition devices between
* device states.
*/
/*
* Description: Update device access time and wake up device, if necessary
*
* Parameters:
* dev - ACPI device previously returned from acpi_register
*
* Details: If called from an interrupt handler acpi_access updates
* access time but should never need to wake up the device
* (if device is generating interrupts, it should be awake
* already) This is important as we can not wake up
* devices (run AML, etc.) from an interrupt handler.
*/
void acpi_access(struct acpi_dev *dev);
/*
* Description: Identify device as currently being idle
*
* Parameters:
* dev - ACPI device previously returned from acpi_register
*
* Details: A call to acpi_idle might signal to the policy manager
* to put a device to sleep. If a new device request arrives
* between the call to acpi_idle and the acpi_transition
* callback, the driver should fail the acpi_transition request.
*/
void acpi_dev_idle(struct acpi_dev *dev);
/*
* Transition function
*
* Parameters:
* dev - ACPI device previously returned from acpi_register
* state - the device state being entered
*
* Returns: 0 if the state transition is possible and context saved
* EINVAL if the requested device state is not supported
* EBUSY if the device is now busy and can not transition
* ENOMEM if the device was unable to save context (out of memory)
*
* Details: The device state transition function will be called
* before the device is transitioned into the D1-D3 states
* or after the device is transitioned into the D0 state.
* The device driver should save (D1-D3) or restore (D0)
* device context when the transition function is called.
*
* For system devices, the ACPI subsystem will perform
* the actual hardware state transition itself. For bus
* devices, after the driver's acpi_transition function
* is called, the bus driver's acpi_transition function
* is called to perform the actual hardware state transition.
*
* Once a driver returns 0 (success) from a transition
* to D1-3 request, it should not process any further
* requests or access the device hardware until a
* call to "acpi_access" is made.
*/
typedef int (*acpi_transition)(struct acpi_dev *dev, acpi_dstate_t state);
VERSION = 2
PATCHLEVEL = 3
SUBLEVEL = 26
SUBLEVEL = 27
EXTRAVERSION =
ARCH := $(shell uname -m | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ -e s/arm.*/arm/ -e s/sa110/arm/)
......
......@@ -206,3 +206,5 @@ EXPORT_SYMBOL_NOVERS(__remq);
EXPORT_SYMBOL_NOVERS(__remqu);
EXPORT_SYMBOL_NOVERS(memcpy);
EXPORT_SYMBOL_NOVERS(memset);
EXPORT_SYMBOL(get_wchan);
......@@ -406,3 +406,35 @@ asmlinkage int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
unlock_kernel();
return error;
}
/*
* These bracket the sleeping functions..
*/
extern void scheduling_functions_start_here(void);
extern void scheduling_functions_end_here(void);
#define first_sched ((unsigned long) scheduling_functions_start_here)
#define last_sched ((unsigned long) scheduling_functions_end_here)
unsigned long get_wchan(struct task_struct *p)
{
unsigned long schedule_frame;
unsigned long pc;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
/*
* This one depends on the frame size of schedule(). Do a
* "disass schedule" in gdb to find the frame size. Also, the
* code assumes that sleep_on() follows immediately after
* interruptible_sleep_on() and that add_timer() follows
* immediately after interruptible_sleep(). Ugly, isn't it?
* Maybe adding a wchan field to task_struct would be better,
* after all...
*/
pc = thread_saved_pc(&p->thread);
if (pc >= first_sched && pc < last_sched) {
schedule_frame = ((unsigned long *)p->thread.ksp)[6];
return ((unsigned long *)schedule_frame)[12];
}
return pc;
}
......@@ -237,3 +237,5 @@ EXPORT_SYMBOL_NOVERS(__down_failed);
EXPORT_SYMBOL_NOVERS(__down_interruptible_failed);
EXPORT_SYMBOL_NOVERS(__down_trylock_failed);
EXPORT_SYMBOL_NOVERS(__up_wakeup);
EXPORT_SYMBOL(get_wchan);
......@@ -337,3 +337,31 @@ pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
return __ret;
}
/*
* These bracket the sleeping functions..
*/
extern void scheduling_functions_start_here(void);
extern void scheduling_functions_end_here(void);
#define first_sched ((unsigned long) scheduling_functions_start_here)
#define last_sched ((unsigned long) scheduling_functions_end_here)
unsigned long get_wchan(struct task_struct *p)
{
unsigned long fp, lr;
unsigned long stack_page;
int count = 0;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
stack_page = 4096 + (unsigned long)p;
fp = get_css_fp(&p->thread);
do {
if (fp < stack_page || fp > 4092+stack_page)
return 0;
lr = pc_pointer (((unsigned long *)fp)[-1]);
if (lr < first_sched || lr > last_sched)
return lr;
fp = *(unsigned long *) (fp - 12);
} while (count ++ < 16);
return 0;
}
......@@ -75,3 +75,5 @@ EXPORT_SYMBOL_NOVERS(__down_failed);
EXPORT_SYMBOL_NOVERS(__down_failed_interruptible);
EXPORT_SYMBOL_NOVERS(__down_failed_trylock);
EXPORT_SYMBOL_NOVERS(__up_wakeup);
EXPORT_SYMBOL(get_wchan);
......@@ -346,3 +346,34 @@ asmlinkage int sys_execve(char *name, char **argv, char **envp)
unlock_kernel();
return error;
}
/*
* These bracket the sleeping functions..
*/
extern void scheduling_functions_start_here(void);
extern void scheduling_functions_end_here(void);
#define first_sched ((unsigned long) scheduling_functions_start_here)
#define last_sched ((unsigned long) scheduling_functions_end_here)
unsigned long get_wchan(struct task_struct *p)
{
unsigned long fp, pc;
unsigned long stack_page;
int count = 0;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
stack_page = (unsigned long)p;
fp = ((struct switch_stack *)p->thread.ksp)->a6;
do {
if (fp < stack_page+sizeof(struct task_struct) ||
fp >= 8184+stack_page)
return 0;
pc = ((unsigned long *)fp)[1];
/* FIXME: This depends on the order of these functions. */
if (pc < first_sched || pc >= last_sched)
return pc;
fp = *(unsigned long *) fp;
} while (count++ < 16);
return 0;
}
......@@ -122,3 +122,4 @@ EXPORT_SYMBOL(unregister_fpe);
EXPORT_SYMBOL(screen_info);
#endif
EXPORT_SYMBOL(get_wchan);
......@@ -182,3 +182,28 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
return retval;
}
/*
* These bracket the sleeping functions..
*/
extern void scheduling_functions_start_here(void);
extern void scheduling_functions_end_here(void);
#define first_sched ((unsigned long) scheduling_functions_start_here)
#define last_sched ((unsigned long) scheduling_functions_end_here)
unsigned long get_wchan(struct task_struct *p)
{
unsigned long schedule_frame;
unsigned long pc;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
/*
* The same comment as on the Alpha applies here, too ...
*/
pc = thread_saved_pc(&p->tss);
if (pc >= (unsigned long) interruptible_sleep_on && pc < (unsigned long) add_timer) {
schedule_frame = ((unsigned long *)(long)p->tss.reg30)[16];
return (unsigned long)((unsigned long *)schedule_frame)[11];
}
return pc;
}
......@@ -44,7 +44,7 @@ else
fi
bool 'Symmetric multi-processing support' CONFIG_SMP
if [ "$CONFIG_6xx" != "y" ];then
if [ "$CONFIG_6xx" = "y" ];then
bool 'AltiVec Support' CONFIG_ALTIVEC
fi
......@@ -90,7 +90,7 @@ bool 'BSD Process Accounting' CONFIG_BSD_PROCESS_ACCT
# only elf supported, a.out is not -- Cort
if [ "$CONFIG_PROC_FS" = "y" ]; then
define_bool CONFIG_KCORE_ELF y
define_bool CONFIG_KCORE_ELF
fi
define_bool CONFIG_BINFMT_ELF y
define_bool CONFIG_KERNEL_ELF y
......
This diff is collapsed.
......@@ -818,6 +818,15 @@ giveup_altivec:
cmpi 0,r4,12
bnelr
/* enable altivec so we can save */
mfmsr r4
oris r4,r4,MSR_VEC@h
mtmsr r4
/* make sure our tsk pointer is valid */
cmpi 0,r3,0
beqlr
/* save altivec regs */
addi r4,r3,THREAD+THREAD_VRSAVE
mfspr r5,256 /* vrsave */
......@@ -830,6 +839,11 @@ giveup_altivec:
lis r6,MSR_VEC@h
andi. r5,r5,r6
stw r5,_MSR(r4)
/* we've given up the altivec - clear the pointer */
li r3,0
lis r4,last_task_used_altivec@h
stw r3,last_task_used_altivec@l(r4)
#endif /* CONFIG_ALTIVEC */
blr
......
......@@ -268,3 +268,4 @@ EXPORT_SYMBOL(irq_desc);
void ppc_irq_dispatch_handler(struct pt_regs *, int);
EXPORT_SYMBOL(ppc_irq_dispatch_handler);
EXPORT_SYMBOL(decrementer_count);
EXPORT_SYMBOL(get_wchan);
......@@ -210,6 +210,11 @@ _switch_to(struct task_struct *prev, struct task_struct *new,
prev->last_processor = prev->processor;
current_set[smp_processor_id()] = new;
#endif /* __SMP__ */
/* Avoid the trap. On smp this this never happens since
* we don't set last_task_used_altivec -- Cort
*/
if ( last_task_used_altivec == new )
new->thread.regs->msr |= MSR_VEC;
new_thread = &new->thread;
old_thread = &current->thread;
*last = _switch(old_thread, new_thread);
......@@ -574,3 +579,32 @@ void __init ll_puts(const char *s)
orig_y = y;
}
#endif
/*
* These bracket the sleeping functions..
*/
extern void scheduling_functions_start_here(void);
extern void scheduling_functions_end_here(void);
#define first_sched ((unsigned long) scheduling_functions_start_here)
#define last_sched ((unsigned long) scheduling_functions_end_here)
unsigned long get_wchan(struct task_struct *p)
{
unsigned long ip, sp;
unsigned long stack_page = (unsigned long) p;
int count = 0;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
sp = p->thread.ksp;
do {
sp = *(unsigned long *)sp;
if (sp < stack_page || sp >= stack_page + 8188)
return 0;
if (count > 0) {
ip = *(unsigned long *)(sp + 4);
if (ip < first_sched || ip >= last_sched)
return ip;
}
} while (count++ < 16);
return 0;
}
......@@ -149,19 +149,19 @@ AltiVecUnavailable(struct pt_regs *regs)
show_regs(regs);
panic("Kernel Used Altivec with MSR_VEC off!\n");
}
#ifdef __SMP__
printk("User Mode altivec trap should not happen in SMP!\n");
#else
if ( last_task_used_altivec != current )
{
if ( last_task_used_altivec )
giveup_altivec(current);
load_up_altivec(current);
/* on SMP we always save/restore on switch */
#ifndef __SMP__
last_task_used_altivec = current;
#endif
}
/* enable altivec for the task on return */
regs->msr |= MSR_VEC;
#endif
}
void
......
......@@ -702,3 +702,37 @@ pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
"g1", "g2", "g3", "o0", "o1", "memory", "cc");
return retval;
}
/*
* These bracket the sleeping functions..
*/
extern void scheduling_functions_start_here(void);
extern void scheduling_functions_end_here(void);
#define first_sched ((unsigned long) scheduling_functions_start_here)
#define last_sched ((unsigned long) scheduling_functions_end_here)
unsigned long get_wchan(struct task_struct *p)
{
unsigned long pc, fp, bias = 0;
unsigned long task_base = (unsigned long) p;
struct reg_window *rw;
int count = 0;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
fp = p->thread.ksp + bias;
do {
/* Bogus frame pointer? */
if (fp < (task_base + sizeof(struct task_struct)) ||
fp >= (task_base + (2 * PAGE_SIZE)))
break;
rw = (struct reg_window *) fp;
pc = rw->ins[7];
if (pc < first_sched || pc >= last_sched)
return pc;
fp = rw->ins[6] + bias;
} while (++count < 16);
return 0;
}
#undef last_sched
#undef first_sched
......@@ -279,3 +279,5 @@ EXPORT_SYMBOL_DOT(mul);
EXPORT_SYMBOL_DOT(umul);
EXPORT_SYMBOL_DOT(div);
EXPORT_SYMBOL_DOT(udiv);
EXPORT_SYMBOL(get_wchan);
......@@ -802,3 +802,35 @@ asmlinkage int sparc_execve(struct pt_regs *regs)
unlock_kernel();
return error;
}
/*
* These bracket the sleeping functions..
*/
extern void scheduling_functions_start_here(void);
extern void scheduling_functions_end_here(void);
#define first_sched ((unsigned long) scheduling_functions_start_here)
#define last_sched ((unsigned long) scheduling_functions_end_here)
unsigned long get_wchan(struct task_struct *p)
{
unsigned long pc, fp, bias = 0;
unsigned long task_base = (unsigned long) p;
struct reg_window *rw;
int count = 0;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
bias = STACK_BIAS;
fp = p->thread.ksp + bias;
do {
/* Bogus frame pointer? */
if (fp < (task_base + sizeof(struct task_struct)) ||
fp >= (task_base + (2 * PAGE_SIZE)))
break;
rw = (struct reg_window *) fp;
pc = rw->ins[7];
if (pc < first_sched || pc >= last_sched)
return pc;
fp = rw->ins[6] + bias;
} while (++count < 16);
return 0;
}
......@@ -311,3 +311,5 @@ EXPORT_SYMBOL_NOVERS(memcmp);
EXPORT_SYMBOL_NOVERS(memcpy);
EXPORT_SYMBOL_NOVERS(memset);
EXPORT_SYMBOL_NOVERS(memmove);
EXPORT_SYMBOL(get_wchan);
......@@ -42,6 +42,7 @@
#include <linux/vt_kern.h>
#include <linux/kbd_ll.h>
#include <linux/sysrq.h>
#include <linux/acpi.h>
#define SIZE(x) (sizeof(x)/sizeof((x)[0]))
......@@ -159,6 +160,8 @@ static int sysrq_pressed;
int sysrq_enabled = 1;
#endif
static struct acpi_dev *acpi_kbd = NULL;
/*
* Many other routines do put_queue, but I think either
* they produce ASCII, or they produce some user-assigned
......@@ -201,6 +204,8 @@ void handle_scancode(unsigned char scancode, int down)
char up_flag = down ? 0 : 0200;
char raw_mode;
acpi_access(acpi_kbd);
do_poke_blanked_console = 1;
mark_bh(CONSOLE_BH);
add_keyboard_randomness(scancode | up_flag);
......@@ -923,5 +928,8 @@ int __init kbd_init(void)
kbd_init_hw();
init_bh(KEYBOARD_BH, kbd_bh);
mark_bh(KEYBOARD_BH);
acpi_kbd = acpi_register(ACPI_SYS_DEV, 0, ACPI_KBC_HID, NULL);
return 0;
}
......@@ -3457,11 +3457,13 @@ static void autoconfig(struct serial_state * state)
}
if (state->type == PORT_16550A) {
/* Check for Oxford Semiconductor 16C950 */
unsigned char scratch4;
scratch = serial_icr_read(info, UART_ID1);
scratch2 = serial_icr_read(info, UART_ID2);
scratch4 = serial_icr_read(info, UART_ID2);
scratch3 = serial_icr_read(info, UART_ID3);
if (scratch == 0x16 && scratch2 == 0xC9 &&
if (scratch == 0x16 && scratch4 == 0xC9 &&
(scratch3 == 0x50 || scratch3 == 0x52 ||
scratch3 == 0x54)) {
state->type = PORT_16C950;
......
......@@ -4,6 +4,6 @@
mainmenu_option next_comment
comment 'Misc devices'
tristate 'Generic ACPI support' CONFIG_ACPI
bool 'ACPI support' CONFIG_ACPI
endmenu
......@@ -19,11 +19,7 @@ O_OBJS :=
OX_OBJS :=
ifeq ($(CONFIG_ACPI),y)
O_OBJS += acpi.o
else
ifeq ($(CONFIG_ACPI),m)
M_OBJS += acpi.o
endif
OX_OBJS += acpi.o
endif
include $(TOPDIR)/Rules.make
......
......@@ -54,6 +54,7 @@
#define DECLARE_WAIT_QUEUE_HEAD(x) struct wait_queue * x = NULL
#endif
static int acpi_idle_thread(void *context);
static int acpi_do_ulong(ctl_table *ctl,
int write,
struct file *file,
......@@ -70,6 +71,8 @@ static int acpi_do_event(ctl_table *ctl,
void *buffer,
size_t *len);
DECLARE_WAIT_QUEUE_HEAD(acpi_idle_wait);
static struct ctl_table_header *acpi_sysctl = NULL;
static struct acpi_facp *acpi_facp = NULL;
......@@ -84,6 +87,9 @@ static volatile u32 acpi_gpe_status = 0;
static volatile u32 acpi_gpe_level = 0;
static DECLARE_WAIT_QUEUE_HEAD(acpi_event_wait);
static spinlock_t acpi_devs_lock = SPIN_LOCK_UNLOCKED;
static LIST_HEAD(acpi_devs);
/* Make it impossible to enter L2/L3 until after we've initialized */
static unsigned long acpi_p_lvl2_lat = ~0UL;
static unsigned long acpi_p_lvl3_lat = ~0UL;
......@@ -907,6 +913,8 @@ static int acpi_do_event(ctl_table *ctl,
*/
static int __init acpi_init(void)
{
int pid;
if (acpi_find_tables() && acpi_find_piix4()) {
// no ACPI tables and not PIIX4
return -ENODEV;
......@@ -927,6 +935,10 @@ static int __init acpi_init(void)
acpi_claim_ioports(acpi_facp);
acpi_sysctl = register_sysctl_table(acpi_dir_table, 1);
pid = kernel_thread(acpi_idle_thread,
NULL,
CLONE_FS | CLONE_FILES | CLONE_SIGHAND);
/*
* Set up the ACPI idle function. Note that we can't really
* do this with multiple CPU's, we'd need a per-CPU ACPI
......@@ -961,13 +973,81 @@ static void __exit acpi_exit(void)
acpi_destroy_tables();
}
#ifdef MODULE
/*
* Register a device with the ACPI subsystem
*/
struct acpi_dev* acpi_register(acpi_dev_t type,
unsigned long adr,
acpi_hid_t hid,
acpi_transition trans)
{
struct acpi_dev *dev = kmalloc(sizeof(struct acpi_dev), GFP_KERNEL);
if (dev) {
unsigned long flags;
memset(dev, 0, sizeof(*dev));
dev->type = type;
dev->adr = adr;
dev->hid = hid;
dev->transition = trans;
spin_lock_irqsave(&acpi_devs_lock, flags);
list_add(&dev->entry, &acpi_devs);
spin_unlock_irqrestore(&acpi_devs_lock, flags);
}
return dev;
}
/*
* Unregister a device with ACPI
*/
void acpi_unregister(struct acpi_dev *dev)
{
if (dev) {
unsigned long flags;
module_init(acpi_init)
module_exit(acpi_exit)
spin_lock_irqsave(&acpi_devs_lock, flags);
list_del(&dev->entry);
spin_unlock_irqrestore(&acpi_devs_lock, flags);
#else
kfree(dev);
}
}
/*
* Wake up a device
*/
void acpi_wakeup(struct acpi_dev *dev)
{
// run _PS0 or tell parent bus to wake device up
}
/*
* Manage idle devices
*/
static int acpi_idle_thread(void *context)
{
exit_mm(current);
exit_files(current);
strcpy(current->comm, "acpi");
for(;;) {
interruptible_sleep_on(&acpi_idle_wait);
if (signal_pending(current))
break;
// find all idle devices and set idle timer based on policy
}
return 0;
}
__initcall(acpi_init);
#endif
/*
* Module visible symbols
*/
EXPORT_SYMBOL(acpi_idle_wait);
EXPORT_SYMBOL(acpi_register);
EXPORT_SYMBOL(acpi_unregister);
EXPORT_SYMBOL(acpi_wakeup);
......@@ -3096,7 +3096,7 @@
5000 NV5000SC
4b10 Buslogic Inc.
4c48 LUNG HWA Electronics
4d51 MEDIAQ Inc.
4d51 MediaQ Inc.
0200 MQ-200
4ddc ILC Data Device Corp
5053 Voyetra Technologies
......
......@@ -156,7 +156,7 @@ static int uhci_td_result(struct uhci_device *dev, struct uhci_td *td, unsigned
if (rval)
*rval += actlength;
if (explength != actlength) {
if (explength != actlength && tmp->pipetype == PIPE_BULK) {
/* If the packet is short, none of the */
/* packets after this were processed, so */
/* fix the DT accordingly */
......
......@@ -720,7 +720,7 @@ static int usb_parse_configuration(struct usb_device *dev, struct usb_config_des
}
memset(config->interface, 0,
config->bNumInterfaces*sizeof(struct usb_interface_descriptor));
config->bNumInterfaces * sizeof(struct usb_interface));
buffer += config->bLength;
size -= config->bLength;
......
......@@ -405,14 +405,13 @@ ncp_d_validate(struct dentry *dentry)
{
unsigned long dent_addr = (unsigned long) dentry;
unsigned long min_addr = PAGE_OFFSET;
unsigned long max_addr = min_addr + (max_mapnr << PAGE_SHIFT);
unsigned long align_mask = 0x0F;
unsigned int len;
int valid = 0;
if (dent_addr < min_addr)
goto bad_addr;
if (dent_addr > max_addr - sizeof(struct dentry))
if (dent_addr > (unsigned long)high_memory - sizeof(struct dentry))
goto bad_addr;
if ((dent_addr & ~align_mask) != dent_addr)
goto bad_align;
......
......@@ -46,6 +46,10 @@
*
* Gerhard Wichert : added BIGMEM support
* Siemens AG <Gerhard.Wichert@pdb.siemens.de>
*
* Al Viro & Jeff Garzik : moved most of the thing into base.c and
* : proc_misc.c. The rest may eventually go into
* : base.c too.
*/
#include <linux/types.h>
......
......@@ -4,6 +4,13 @@
* Copyright (C) 1991, 1992 Linus Torvalds
*
* proc base directory handling functions
*
* 1999, Al Viro. Rewritten. Now it covers the whole per-process part.
* Instead of using magical inumbers to determine the kind of object
* we allocate and fill in-core inodes upon lookup. They don't even
* go into icache. We cache the reference to task_struct upon lookup too.
* Eventually it should become a filesystem in its own. We don't use the
* rest of procfs anymore.
*/
#include <asm/uaccess.h>
......@@ -16,6 +23,14 @@
#include <linux/init.h>
#include <linux/file.h>
/*
* For hysterical raisins we keep the same inumbers as in the old procfs.
* Feel free to change the macro below - just keep the range distinct from
* inumbers of the rest of procfs (currently those are in 0x0000--0xffff).
* As soon as we'll get a separate superblock we will be able to forget
* about magical ranges too.
*/
#define fake_ino(pid,ino) (((pid)<<16)|(ino))
ssize_t proc_pid_read_maps(struct task_struct*,struct file*,char*,size_t,loff_t*);
......@@ -241,7 +256,7 @@ static struct file_operations proc_info_file_operations = {
proc_info_read, /* read */
};
struct inode_operations proc_info_inode_operations = {
static struct inode_operations proc_info_inode_operations = {
&proc_info_file_operations, /* default proc file-ops */
};
......@@ -466,6 +481,26 @@ struct pid_entry {
mode_t mode;
};
enum pid_directory_inos {
PROC_PID_INO = 2,
PROC_PID_STATUS,
PROC_PID_MEM,
PROC_PID_CWD,
PROC_PID_ROOT,
PROC_PID_EXE,
PROC_PID_FD,
PROC_PID_ENVIRON,
PROC_PID_CMDLINE,
PROC_PID_STAT,
PROC_PID_STATM,
PROC_PID_MAPS,
#if CONFIG_AP1000
PROC_PID_RINGBUF,
#endif
PROC_PID_CPU,
PROC_PID_FD_DIR = 0x8000, /* 0x8000-0xffff */
};
#define E(type,name,mode) {(type),sizeof(name)-1,(name),(mode)}
static struct pid_entry base_stuff[] = {
E(PROC_PID_FD, "fd", S_IFDIR|S_IRUSR|S_IXUSR),
......@@ -582,7 +617,7 @@ static int proc_base_readdir(struct file * filp,
/* building an inode */
static struct inode *proc_pid_get_inode(struct super_block * sb, struct task_struct *task, int ino)
static struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *task, int ino)
{
struct inode * inode;
......@@ -694,7 +729,7 @@ static struct dentry *proc_lookupfd(struct inode * dir, struct dentry * dentry)
goto out;
}
inode = proc_pid_get_inode(dir->i_sb, task, PROC_PID_FD_DIR+fd);
inode = proc_pid_make_inode(dir->i_sb, task, PROC_PID_FD_DIR+fd);
if (!inode)
goto out;
/* FIXME */
......@@ -778,7 +813,7 @@ static struct dentry *proc_base_lookup(struct inode *dir, struct dentry *dentry)
goto out;
error = -EINVAL;
inode = proc_pid_get_inode(dir->i_sb, task, p->type);
inode = proc_pid_make_inode(dir->i_sb, task, p->type);
if (!inode)
goto out;
......@@ -899,7 +934,7 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry)
if (!task)
goto out;
inode = proc_pid_get_inode(dir->i_sb, task, PROC_PID_INO);
inode = proc_pid_make_inode(dir->i_sb, task, PROC_PID_INO);
free_task_struct(task);
......@@ -923,3 +958,60 @@ void proc_pid_delete_inode(struct inode *inode)
fput(inode->u.proc_i.file);
free_task_struct(inode->u.proc_i.task);
}
#define PROC_NUMBUF 10
#define PROC_MAXPIDS 20
/*
* Get a few pid's to return for filldir - we need to hold the
* tasklist lock while doing this, and we must release it before
* we actually do the filldir itself, so we use a temp buffer..
*/
static int get_pid_list(int index, unsigned int *pids)
{
struct task_struct *p;
int nr_pids = 0;
index -= FIRST_PROCESS_ENTRY;
read_lock(&tasklist_lock);
for_each_task(p) {
int pid = p->pid;
if (!pid)
continue;
if (--index >= 0)
continue;
pids[nr_pids] = pid;
nr_pids++;
if (nr_pids >= PROC_MAXPIDS)
break;
}
read_unlock(&tasklist_lock);
return nr_pids;
}
int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
{
unsigned int pid_array[PROC_MAXPIDS];
char buf[PROC_NUMBUF];
unsigned int nr = filp->f_pos;
unsigned int nr_pids, i;
nr_pids = get_pid_list(nr, pid_array);
for (i = 0; i < nr_pids; i++) {
int pid = pid_array[i];
ino_t ino = fake_ino(pid,PROC_PID_INO);
unsigned long j = PROC_NUMBUF;
do {
j--;
buf[j] = '0' + (pid % 10);
pid /= 10;
} while (pid);
if (filldir(dirent, buf+j, PROC_NUMBUF-j, filp->f_pos, ino) < 0)
break;
filp->f_pos++;
}
return 0;
}
......@@ -23,7 +23,7 @@
ssize_t read_kcore(struct file * file, char * buf,
size_t count, loff_t *ppos)
{
unsigned long p = *ppos, memsize;
unsigned long long p = *ppos, memsize;
ssize_t read;
ssize_t count1;
char * pnt;
......
......@@ -22,11 +22,6 @@
#include <linux/zorro.h>
#endif
/*
* Offset of the first process in the /proc root directory..
*/
#define FIRST_PROCESS_ENTRY 256
static int proc_root_readdir(struct file *, void *, filldir_t);
static struct dentry *proc_root_lookup(struct inode *,struct dentry *);
static int proc_unlink(struct inode *, struct dentry *);
......@@ -46,13 +41,6 @@ static struct file_operations proc_dir_operations = {
NULL, /* read - bad */
NULL, /* write - bad */
proc_readdir, /* readdir */
NULL, /* poll - default */
NULL, /* ioctl - default */
NULL, /* mmap */
NULL, /* no special open code */
NULL, /* flush */
NULL, /* no special release code */
NULL /* can't fsync */
};
/*
......@@ -62,23 +50,6 @@ struct inode_operations proc_dir_inode_operations = {
&proc_dir_operations, /* default net directory file-ops */
NULL, /* create */
proc_lookup, /* lookup */
NULL, /* link */
NULL, /* unlink */
NULL, /* symlink */
NULL, /* mkdir */
NULL, /* rmdir */
NULL, /* mknod */
NULL, /* rename */
NULL, /* readlink */
NULL, /* follow_link */
NULL, /* get_block */
NULL, /* readpage */
NULL, /* writepage */
NULL, /* flushpage */
NULL, /* truncate */
NULL, /* permission */
NULL, /* smap */
NULL /* revalidate */
};
/*
......@@ -90,21 +61,6 @@ struct inode_operations proc_dyna_dir_inode_operations = {
proc_lookup, /* lookup */
NULL, /* link */
proc_unlink, /* unlink(struct inode *, struct dentry *) */
NULL, /* symlink */
NULL, /* mkdir */
NULL, /* rmdir */
NULL, /* mknod */
NULL, /* rename */
NULL, /* readlink */
NULL, /* follow_link */
NULL, /* get_block */
NULL, /* readpage */
NULL, /* writepage */
NULL, /* flushpage */
NULL, /* truncate */
NULL, /* permission */
NULL, /* smap */
NULL /* revalidate */
};
/*
......@@ -117,13 +73,6 @@ static struct file_operations proc_root_operations = {
NULL, /* read - bad */
NULL, /* write - bad */
proc_root_readdir, /* readdir */
NULL, /* poll - default */
NULL, /* ioctl - default */
NULL, /* mmap */
NULL, /* no special open code */
NULL, /* flush */
NULL, /* no special release code */
NULL /* no fsync */
};
/*
......@@ -133,23 +82,6 @@ static struct inode_operations proc_root_inode_operations = {
&proc_root_operations, /* default base directory file-ops */
NULL, /* create */
proc_root_lookup, /* lookup */
NULL, /* link */
NULL, /* unlink */
NULL, /* symlink */
NULL, /* mkdir */
NULL, /* rmdir */
NULL, /* mknod */
NULL, /* rename */
NULL, /* readlink */
NULL, /* follow_link */
NULL, /* get_block */
NULL, /* readpage */
NULL, /* writepage */
NULL, /* flushpage */
NULL, /* truncate */
NULL, /* permission */
NULL, /* smap */
NULL /* revalidate */
};
/*
......@@ -268,36 +200,12 @@ static struct file_operations proc_openprom_operations = {
NULL, /* read - bad */
NULL, /* write - bad */
OPENPROM_DEFREADDIR, /* readdir */
NULL, /* poll - default */
NULL, /* ioctl - default */
NULL, /* mmap */
NULL, /* no special open code */
NULL, /* flush */
NULL, /* no special release code */
NULL /* can't fsync */
};
struct inode_operations proc_openprom_inode_operations = {
&proc_openprom_operations,/* default net directory file-ops */
NULL, /* create */
OPENPROM_DEFLOOKUP, /* lookup */
NULL, /* link */
NULL, /* unlink */
NULL, /* symlink */
NULL, /* mkdir */
NULL, /* rmdir */
NULL, /* mknod */
NULL, /* rename */
NULL, /* readlink */
NULL, /* follow_link */
NULL, /* get_block */
NULL, /* readpage */
NULL, /* writepage */
NULL, /* flushpage */
NULL, /* truncate */
NULL, /* permission */
NULL, /* smap */
NULL /* revalidate */
};
struct proc_dir_entry proc_openprom = {
......@@ -612,7 +520,7 @@ struct dentry *proc_lookup(struct inode * dir, struct dentry *dentry)
if (de->namelen != dentry->d_name.len)
continue;
if (!memcmp(dentry->d_name.name, de->name, de->namelen)) {
int ino = de->low_ino | (dir->i_ino & ~(0xffff));
int ino = de->low_ino;
error = -EINVAL;
inode = proc_get_inode(dir->i_sb, ino, de);
break;
......@@ -700,7 +608,6 @@ int proc_readdir(struct file * filp,
filp->f_pos++;
/* fall through */
default:
ino &= ~0xffff;
de = de->subdir;
i -= 2;
for (;;) {
......@@ -713,7 +620,7 @@ int proc_readdir(struct file * filp,
}
do {
if (filldir(dirent, de->name, de->namelen, filp->f_pos, ino | de->low_ino) < 0)
if (filldir(dirent, de->name, de->namelen, filp->f_pos, de->low_ino) < 0)
return 0;
filp->f_pos++;
de = de->next;
......@@ -722,69 +629,19 @@ int proc_readdir(struct file * filp,
return 1;
}
#define PROC_NUMBUF 10
#define PROC_MAXPIDS 20
/*
* Get a few pid's to return for filldir - we need to hold the
* tasklist lock while doing this, and we must release it before
* we actually do the filldir itself, so we use a temp buffer..
*/
static int get_pid_list(int index, unsigned int *pids)
{
struct task_struct *p;
int nr_pids = 0;
index -= FIRST_PROCESS_ENTRY;
read_lock(&tasklist_lock);
for_each_task(p) {
int pid = p->pid;
if (!pid)
continue;
if (--index >= 0)
continue;
pids[nr_pids] = pid;
nr_pids++;
if (nr_pids >= PROC_MAXPIDS)
break;
}
read_unlock(&tasklist_lock);
return nr_pids;
}
static int proc_root_readdir(struct file * filp,
void * dirent, filldir_t filldir)
{
unsigned int pid_array[PROC_MAXPIDS];
char buf[PROC_NUMBUF];
unsigned int nr = filp->f_pos;
unsigned int nr_pids, i;
if (nr < FIRST_PROCESS_ENTRY) {
int error = proc_readdir(filp, dirent, filldir);
if (error <= 0)
return error;
filp->f_pos = nr = FIRST_PROCESS_ENTRY;
filp->f_pos = FIRST_PROCESS_ENTRY;
}
nr_pids = get_pid_list(nr, pid_array);
for (i = 0; i < nr_pids; i++) {
int pid = pid_array[i];
ino_t ino = (pid << 16) + PROC_PID_INO;
unsigned long j = PROC_NUMBUF;
do {
j--;
buf[j] = '0' + (pid % 10);
pid /= 10;
} while (pid);
if (filldir(dirent, buf+j, PROC_NUMBUF-j, filp->f_pos, ino) < 0)
break;
filp->f_pos++;
}
return 0;
return proc_pid_readdir(filp, dirent, filldir);
}
static int proc_unlink(struct inode *dir, struct dentry *dentry)
......
#ifndef __ALPHA_DIV64
#define __ALPHA_DIV64
/*
* Hey, we're already 64-bit, no
* need to play games..
*/
#define do_div(n,base) ({ \
int __res; \
__res = ((unsigned long) n) % (unsigned) base; \
n = ((unsigned long) n) / (unsigned) base; \
__res; })
#endif
......@@ -126,40 +126,7 @@ extern long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
#define release_segments(mm) do { } while (0)
#define forget_segments() do { } while (0)
/*
* These bracket the sleeping functions..
*/
extern void scheduling_functions_start_here(void);
extern void scheduling_functions_end_here(void);
#define first_sched ((unsigned long) scheduling_functions_start_here)
#define last_sched ((unsigned long) scheduling_functions_end_here)
static inline unsigned long get_wchan(struct task_struct *p)
{
unsigned long schedule_frame;
unsigned long pc;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
/*
* This one depends on the frame size of schedule(). Do a
* "disass schedule" in gdb to find the frame size. Also, the
* code assumes that sleep_on() follows immediately after
* interruptible_sleep_on() and that add_timer() follows
* immediately after interruptible_sleep(). Ugly, isn't it?
* Maybe adding a wchan field to task_struct would be better,
* after all...
*/
pc = thread_saved_pc(&p->thread);
if (pc >= first_sched && pc < last_sched) {
schedule_frame = ((unsigned long *)p->thread.ksp)[6];
return ((unsigned long *)schedule_frame)[12];
}
return pc;
}
#undef last_sched
#undef first_sched
unsigned long get_wchan(struct task_struct *p);
/*
* See arch/alpha/kernel/ptrace.c for details.
*/
......
......@@ -107,36 +107,7 @@ extern void release_thread(struct task_struct *);
#define release_segments(mm) do { } while (0)
#define forget_segments() do { } while (0)
/*
* These bracket the sleeping functions..
*/
extern void scheduling_functions_start_here(void);
extern void scheduling_functions_end_here(void);
#define first_sched ((unsigned long) scheduling_functions_start_here)
#define last_sched ((unsigned long) scheduling_functions_end_here)
static inline unsigned long get_wchan(struct task_struct *p)
{
unsigned long fp, lr;
unsigned long stack_page;
int count = 0;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
stack_page = 4096 + (unsigned long)p;
fp = get_css_fp(&p->thread);
do {
if (fp < stack_page || fp > 4092+stack_page)
return 0;
lr = pc_pointer (((unsigned long *)fp)[-1]);
if (lr < first_sched || lr > last_sched)
return lr;
fp = *(unsigned long *) (fp - 12);
} while (count ++ < 16);
return 0;
}
#undef last_sched
#undef first_sched
unsigned long get_wchan(struct task_struct *p);
#ifdef CONFIG_CPU_26
# define KSTK_EIP(tsk) (((unsigned long *)(4096+(unsigned long)(tsk)))[1022])
......
#ifndef __I386_DIV64
#define __I386_DIV64
#define do_div(n,base) ({ \
unsigned long __upper, __low, __high, __mod; \
asm("":"=a" (__low), "=d" (__high):"A" (n)); \
__upper = __high; \
if (__high) { \
__upper = __high % (base); \
__high = __high / (base); \
} \
asm("divl %2":"=a" (__low), "=d" (__mod):"rm" (base), "0" (__low), "1" (__upper)); \
asm("":"=A" (n):"a" (__low),"d" (__high)); \
__mod; \
})
#endif
......@@ -134,38 +134,7 @@ extern inline unsigned long thread_saved_pc(struct thread_struct *t)
return sw->retpc;
}
/*
* These bracket the sleeping functions..
*/
extern void scheduling_functions_start_here(void);
extern void scheduling_functions_end_here(void);
#define first_sched ((unsigned long) scheduling_functions_start_here)
#define last_sched ((unsigned long) scheduling_functions_end_here)
static inline unsigned long get_wchan(struct task_struct *p)
{
unsigned long fp, pc;
unsigned long stack_page;
int count = 0;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
stack_page = (unsigned long)p;
fp = ((struct switch_stack *)p->thread.ksp)->a6;
do {
if (fp < stack_page+sizeof(struct task_struct) ||
fp >= 8184+stack_page)
return 0;
pc = ((unsigned long *)fp)[1];
/* FIXME: This depends on the order of these functions. */
if (pc < first_sched || pc >= last_sched)
return pc;
fp = *(unsigned long *) fp;
} while (count++ < 16);
return 0;
}
#undef last_sched
#undef first_sched
unsigned long get_wchan(struct task_struct *p);
#define KSTK_EIP(tsk) \
({ \
......
......@@ -206,32 +206,7 @@ extern inline unsigned long thread_saved_pc(struct thread_struct *t)
*/
extern void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp);
/*
* These bracket the sleeping functions..
*/
extern void scheduling_functions_start_here(void);
extern void scheduling_functions_end_here(void);
#define first_sched ((unsigned long) scheduling_functions_start_here)
#define last_sched ((unsigned long) scheduling_functions_end_here)
static inline unsigned long get_wchan(struct task_struct *p)
{
unsigned long schedule_frame;
unsigned long pc;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
/*
* The same comment as on the Alpha applies here, too ...
*/
pc = thread_saved_pc(&p->tss);
if (pc >= (unsigned long) interruptible_sleep_on && pc < (unsigned long) add_timer) {
schedule_frame = ((unsigned long *)(long)p->tss.reg30)[16];
return (unsigned long)((unsigned long *)schedule_frame)[11];
}
return pc;
}
#undef last_sched
#undef first_sched
unsigned long get_wchan(struct task_struct *p);
#define PT_REG(reg) ((long)&((struct pt_regs *)0)->reg \
- sizeof(struct pt_regs))
......
......@@ -316,36 +316,7 @@ static inline unsigned long thread_saved_pc(struct thread_struct *t)
#define release_segments(mm) do { } while (0)
#define forget_segments() do { } while (0)
/*
* These bracket the sleeping functions..
*/
extern void scheduling_functions_start_here(void);
extern void scheduling_functions_end_here(void);
#define first_sched ((unsigned long) scheduling_functions_start_here)
#define last_sched ((unsigned long) scheduling_functions_end_here)
static inline unsigned long get_wchan(struct task_struct *p)
{
unsigned long ip, sp;
unsigned long stack_page = (unsigned long) p;
int count = 0;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
sp = p->thread.ksp;
do {
sp = *(unsigned long *)sp;
if (sp < stack_page || sp >= stack_page + 8188)
return 0;
if (count > 0) {
ip = *(unsigned long *)(sp + 4);
if (ip < first_sched || ip >= last_sched)
return ip;
}
} while (count++ < 16);
return 0;
}
#undef last_sched
#undef first_sched
unsigned long get_wchan(struct task_struct *p);
#define KSTK_EIP(tsk) ((tsk)->thread.regs->nip)
#define KSTK_ESP(tsk) ((tsk)->thread.regs->gpr[1])
......
......@@ -161,39 +161,7 @@ extern pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
#define release_segments(mm) do { } while (0)
#define forget_segments() do { } while (0)
/*
* These bracket the sleeping functions..
*/
extern void scheduling_functions_start_here(void);
extern void scheduling_functions_end_here(void);
#define first_sched ((unsigned long) scheduling_functions_start_here)
#define last_sched ((unsigned long) scheduling_functions_end_here)
static inline unsigned long get_wchan(struct task_struct *p)
{
unsigned long pc, fp, bias = 0;
unsigned long task_base = (unsigned long) p;
struct reg_window *rw;
int count = 0;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
fp = p->thread.ksp + bias;
do {
/* Bogus frame pointer? */
if (fp < (task_base + sizeof(struct task_struct)) ||
fp >= (task_base + (2 * PAGE_SIZE)))
break;
rw = (struct reg_window *) fp;
pc = rw->ins[7];
if (pc < first_sched || pc >= last_sched)
return pc;
fp = rw->ins[6] + bias;
} while (++count < 16);
return 0;
}
#undef last_sched
#undef first_sched
unsigned long get_wchan(struct task_struct *p);
#define KSTK_EIP(tsk) ((tsk)->thread.kregs->pc)
#define KSTK_ESP(tsk) ((tsk)->thread.kregs->u_regs[UREG_FP])
......
......@@ -209,39 +209,7 @@ extern pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
#define release_segments(mm) do { } while (0)
#define forget_segments() do { } while (0)
/*
* These bracket the sleeping functions..
*/
extern void scheduling_functions_start_here(void);
extern void scheduling_functions_end_here(void);
#define first_sched ((unsigned long) scheduling_functions_start_here)
#define last_sched ((unsigned long) scheduling_functions_end_here)
static inline unsigned long get_wchan(struct task_struct *p)
{
unsigned long pc, fp, bias = 0;
unsigned long task_base = (unsigned long) p;
struct reg_window *rw;
int count = 0;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
bias = STACK_BIAS;
fp = p->thread.ksp + bias;
do {
/* Bogus frame pointer? */
if (fp < (task_base + sizeof(struct task_struct)) ||
fp >= (task_base + (2 * PAGE_SIZE)))
break;
rw = (struct reg_window *) fp;
pc = rw->ins[7];
if (pc < first_sched || pc >= last_sched)
return pc;
fp = rw->ins[6] + bias;
} while (++count < 16);
return 0;
}
#undef last_sched
#undef first_sched
unsigned long get_wchan(struct task_struct *p);
#define KSTK_EIP(tsk) ((tsk)->thread.kregs->tpc)
#define KSTK_ESP(tsk) ((tsk)->thread.kregs->u_regs[UREG_FP])
......
......@@ -24,6 +24,147 @@
#include <linux/types.h>
#include <linux/ioctl.h>
#ifdef __KERNEL__
#include <linux/sched.h>
#include <linux/wait.h>
/*
* Device types
*/
enum
{
ACPI_SYS_DEV, /* system device (fan, KB controller, ...) */
ACPI_PCI_DEV, /* generic PCI device */
ACPI_PCI_BUS, /* PCI bus */
ACPI_ISA_DEV, /* generic ISA device */
ACPI_ISA_BUS, /* ISA bus */
ACPI_USB_DEV, /* generic USB device */
ACPI_USB_HUB, /* USB hub device */
ACPI_USB_CTRL, /* USB controller */
ACPI_SCSI_DEV, /* generic SCSI device */
ACPI_SCSI_CTRL, /* SCSI controller */
};
typedef int acpi_dev_t;
/*
* Device addresses
*/
#define ACPI_PCI_ADR(dev) ((dev)->bus->number << 16 | (dev)->devfn)
/*
* HID (PnP) values
*/
enum
{
ACPI_UNKNOWN_HID = 0x00000000, /* generic */
ACPI_KBC_HID = 0x41d00303, /* keyboard controller */
ACPI_COM_HID = 0x41d00500, /* serial port */
ACPI_FDC_HID = 0x41d00700, /* floppy controller */
ACPI_VGA_HID = 0x41d00900, /* VGA controller */
ACPI_ISA_HID = 0x41d00a00, /* ISA bus */
ACPI_EISA_HID = 0x41d00a01, /* EISA bus */
ACPI_PCI_HID = 0x41d00a03, /* PCI bus */
};
typedef int acpi_hid_t;
/*
* Device states
*/
enum
{
ACPI_D0, /* fully-on */
ACPI_D1, /* partial-on */
ACPI_D2, /* partial-on */
ACPI_D3, /* fully-off */
};
typedef int acpi_dstate_t;
struct acpi_dev;
/*
* Device state transition function
*/
typedef int (*acpi_transition)(struct acpi_dev *dev, acpi_dstate_t state);
/*
* ACPI device information
*/
struct acpi_dev
{
acpi_dev_t type; /* device type */
unsigned long adr; /* bus address or unique id */
acpi_hid_t hid; /* P&P identifier */
acpi_transition transition; /* state transition callback */
acpi_dstate_t state; /* current D-state */
unsigned long accessed; /* last access time */
unsigned long idle; /* last idle time */
struct list_head entry; /* linked list entry */
};
#ifdef CONFIG_ACPI
extern wait_queue_head_t acpi_idle_wait;
/*
* Register a device with the ACPI subsystem
*/
struct acpi_dev *acpi_register(acpi_dev_t type,
unsigned long adr,
acpi_hid_t hid,
acpi_transition trans);
/*
* Unregister a device with ACPI
*/
void acpi_unregister(struct acpi_dev *dev);
/*
* Update device access time and wake up device, if necessary
*/
extern inline void acpi_access(struct acpi_dev *dev)
{
extern void acpi_wakeup(struct acpi_dev *dev);
if (dev->state != ACPI_D0)
acpi_wakeup(dev);
dev->accessed = jiffies;
}
/*
* Identify device as currently being idle
*/
extern inline void acpi_dev_idle(struct acpi_dev *dev)
{
dev->idle = jiffies;
if (waitqueue_active(&acpi_idle_wait))
wake_up(&acpi_idle_wait);
}
#else /* CONFIG_ACPI */
extern inline struct acpi_dev*
acpi_register(acpi_dev_t type,
unsigned long adr,
acpi_hid_t hid,
acpi_transition trans)
{
return 0;
}
extern inline void acpi_unregister(struct acpi_dev *dev) {}
extern inline void acpi_access(struct acpi_dev *dev) {}
extern inline void acpi_dev_idle(struct acpi_dev *dev) {}
#endif /* CONFIG_ACPI */
extern void (*acpi_idle)(void);
extern void (*acpi_power_off)(void);
#endif /* __KERNEL__ */
/* RSDP location */
#define ACPI_BIOS_ROM_BASE (0x0e0000)
#define ACPI_BIOS_ROM_END (0x100000)
......@@ -181,7 +322,8 @@ enum
ACPI_P_LVL3,
ACPI_P_LVL2_LAT,
ACPI_P_LVL3_LAT,
ACPI_S5_SLP_TYP
ACPI_S5_SLP_TYP,
ACPI_KBD,
};
#define ACPI_P_LVL_DISABLED 0x80
......@@ -224,11 +366,4 @@ enum
#define ACPI_PIIX4_PMREGMISC 0x80
#define ACPI_PIIX4_PMIOSE 0x01
#ifdef __KERNEL__
extern void (*acpi_idle)(void);
extern void (*acpi_power_off)(void);
#endif
#endif /* _LINUX_ACPI_H */
......@@ -8,6 +8,12 @@
* The proc filesystem constants/structures
*/
/*
* Offset of the first process in the /proc root directory..
*/
#define FIRST_PROCESS_ENTRY 256
/*
* We always define these enumerators
*/
......@@ -16,33 +22,6 @@ enum {
PROC_ROOT_INO = 1,
};
enum pid_directory_inos {
PROC_PID_INO = 2,
PROC_PID_STATUS,
PROC_PID_MEM,
PROC_PID_CWD,
PROC_PID_ROOT,
PROC_PID_EXE,
PROC_PID_FD,
PROC_PID_ENVIRON,
PROC_PID_CMDLINE,
PROC_PID_STAT,
PROC_PID_STATM,
PROC_PID_MAPS,
#if CONFIG_AP1000
PROC_PID_RINGBUF,
#endif
PROC_PID_CPU,
};
enum pid_subdirectory_inos {
PROC_PID_FD_DIR = 0x8000, /* 0x8000-0xffff */
};
enum net_directory_inos {
PROC_NET_LAST
};
enum scsi_directory_inos {
PROC_SCSI_SCSI = 256,
PROC_SCSI_ADVANSYS,
......@@ -203,9 +182,12 @@ extern struct proc_dir_entry proc_root_kcore;
extern struct inode_operations proc_scsi_inode_operations;
extern void proc_root_init(void);
extern void proc_base_init(void);
extern void proc_misc_init(void);
struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry);
void proc_pid_delete_inode(struct inode *inode);
int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir);
extern int proc_register(struct proc_dir_entry *, struct proc_dir_entry *);
extern int proc_unregister(struct proc_dir_entry *, int);
......@@ -335,8 +317,6 @@ extern struct inode_operations proc_netdir_inode_operations;
extern struct inode_operations proc_openprom_inode_operations;
extern struct inode_operations proc_mem_inode_operations;
extern struct inode_operations proc_sys_inode_operations;
extern struct inode_operations proc_array_inode_operations;
extern struct inode_operations proc_arraylong_inode_operations;
extern struct inode_operations proc_kcore_inode_operations;
extern struct inode_operations proc_profile_inode_operations;
extern struct inode_operations proc_kmsg_inode_operations;
......
......@@ -14,6 +14,8 @@
#include <linux/string.h>
#include <linux/ctype.h>
#include <asm/div64.h>
unsigned long simple_strtoul(const char *cp,char **endp,unsigned int base)
{
unsigned long result = 0,value;
......@@ -66,14 +68,7 @@ static int skip_atoi(const char **s)
#define SPECIAL 32 /* 0x */
#define LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */
#define do_div(n,base) ({ \
int __res; \
__res = ((unsigned long) n) % (unsigned) base; \
n = ((unsigned long) n) / (unsigned) base; \
__res; })
static char * number(char * str, long num, int base, int size, int precision
,int type)
static char * number(char * str, long long num, int base, int size, int precision, int type)
{
char c,sign,tmp[66];
const char *digits="0123456789abcdefghijklmnopqrstuvwxyz";
......@@ -145,7 +140,7 @@ int sprintf(char * buf, const char *fmt, ...);
int vsprintf(char *buf, const char *fmt, va_list args)
{
int len;
unsigned long num;
unsigned long long num;
int i, base;
char * str;
const char *s;
......@@ -295,18 +290,23 @@ int vsprintf(char *buf, const char *fmt, va_list args)
--fmt;
continue;
}
if (qualifier == 'l')
if (qualifier == 'L')
num = va_arg(args, long long);
else if (qualifier == 'l') {
num = va_arg(args, unsigned long);
else if (qualifier == 'z')
if (flags & SIGN)
num = (signed long) num;
} else if (qualifier == 'z') {
num = va_arg(args, size_t);
else if (qualifier == 'h') {
} else if (qualifier == 'h') {
num = (unsigned short) va_arg(args, int);
if (flags & SIGN)
num = (short) num;
} else if (flags & SIGN)
num = va_arg(args, int);
else
num = (signed short) num;
} else {
num = va_arg(args, unsigned int);
if (flags & SIGN)
num = (signed int) num;
}
str = number(str, num, base, field_width, precision, flags);
}
*str = '\0';
......
......@@ -204,7 +204,7 @@ void * vmalloc(unsigned long size)
struct vm_struct *area;
size = PAGE_ALIGN(size);
if (!size || size > (max_mapnr << PAGE_SHIFT)) {
if (!size || (size >> PAGE_SHIFT) > max_mapnr)) {
BUG();
return NULL;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment