Commit 6e67e940 authored by Steve French's avatar Steve French

Merge bk://linux.bkbits.net/linux-2.5

into hostme.bitkeeper.com:/repos/c/cifs/linux-2.5cifs
parents 840d77a4 deaeb66a
...@@ -103,7 +103,7 @@ static int valid_stack_ptr(struct task_struct *task, void *p) ...@@ -103,7 +103,7 @@ static int valid_stack_ptr(struct task_struct *task, void *p)
} }
#ifdef CONFIG_FRAME_POINTER #ifdef CONFIG_FRAME_POINTER
void print_context_stack(struct task_struct *task, unsigned long *stack, static void print_context_stack(struct task_struct *task, unsigned long *stack,
unsigned long ebp) unsigned long ebp)
{ {
unsigned long addr; unsigned long addr;
...@@ -117,7 +117,7 @@ void print_context_stack(struct task_struct *task, unsigned long *stack, ...@@ -117,7 +117,7 @@ void print_context_stack(struct task_struct *task, unsigned long *stack,
} }
} }
#else #else
void print_context_stack(struct task_struct *task, unsigned long *stack, static void print_context_stack(struct task_struct *task, unsigned long *stack,
unsigned long ebp) unsigned long ebp)
{ {
unsigned long addr; unsigned long addr;
...@@ -125,8 +125,9 @@ void print_context_stack(struct task_struct *task, unsigned long *stack, ...@@ -125,8 +125,9 @@ void print_context_stack(struct task_struct *task, unsigned long *stack,
while (!kstack_end(stack)) { while (!kstack_end(stack)) {
addr = *stack++; addr = *stack++;
if (__kernel_text_address(addr)) { if (__kernel_text_address(addr)) {
printk(" [<%08lx>] ", addr); printk(" [<%08lx>]", addr);
print_symbol("%s\n", addr); print_symbol(" %s", addr);
printk("\n");
} }
} }
} }
......
...@@ -261,7 +261,27 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code) ...@@ -261,7 +261,27 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
if (in_atomic() || !mm) if (in_atomic() || !mm)
goto bad_area_nosemaphore; goto bad_area_nosemaphore;
down_read(&mm->mmap_sem); /* When running in the kernel we expect faults to occur only to
* addresses in user space. All other faults represent errors in the
* kernel and should generate an OOPS. Unfortunatly, in the case of an
* erroneous fault occuring in a code path which already holds mmap_sem
* we will deadlock attempting to validate the fault against the
* address space. Luckily the kernel only validly references user
* space from well defined areas of code, which are listed in the
* exceptions table.
*
* As the vast majority of faults will be valid we will only perform
* the source reference check when there is a possibilty of a deadlock.
* Attempt to lock the address space, if we cannot we then validate the
* source. If this is invalid we can skip the address space check,
* thus avoiding the deadlock.
*/
if (!down_read_trylock(&mm->mmap_sem)) {
if ((error_code & 4) == 0 &&
!search_exception_tables(regs->eip))
goto bad_area_nosemaphore;
down_read(&mm->mmap_sem);
}
vma = find_vma(mm, address); vma = find_vma(mm, address);
if (!vma) if (!vma)
......
...@@ -654,13 +654,13 @@ extern atomic_t dcpage_flushes_xcall; ...@@ -654,13 +654,13 @@ extern atomic_t dcpage_flushes_xcall;
static __inline__ void __local_flush_dcache_page(struct page *page) static __inline__ void __local_flush_dcache_page(struct page *page)
{ {
#if (L1DCACHE_SIZE > PAGE_SIZE) #if (L1DCACHE_SIZE > PAGE_SIZE)
__flush_dcache_page(page->virtual, __flush_dcache_page(page_address(page),
((tlb_type == spitfire) && ((tlb_type == spitfire) &&
page_mapping(page) != NULL)); page_mapping(page) != NULL));
#else #else
if (page_mapping(page) != NULL && if (page_mapping(page) != NULL &&
tlb_type == spitfire) tlb_type == spitfire)
__flush_icache_page(__pa(page->virtual)); __flush_icache_page(__pa(page_address(page)));
#endif #endif
} }
...@@ -675,6 +675,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu) ...@@ -675,6 +675,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
if (cpu == this_cpu) { if (cpu == this_cpu) {
__local_flush_dcache_page(page); __local_flush_dcache_page(page);
} else if (cpu_online(cpu)) { } else if (cpu_online(cpu)) {
void *pg_addr = page_address(page);
u64 data0; u64 data0;
if (tlb_type == spitfire) { if (tlb_type == spitfire) {
...@@ -683,14 +684,14 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu) ...@@ -683,14 +684,14 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
if (page_mapping(page) != NULL) if (page_mapping(page) != NULL)
data0 |= ((u64)1 << 32); data0 |= ((u64)1 << 32);
spitfire_xcall_deliver(data0, spitfire_xcall_deliver(data0,
__pa(page->virtual), __pa(pg_addr),
(u64) page->virtual, (u64) pg_addr,
mask); mask);
} else { } else {
data0 = data0 =
((u64)&xcall_flush_dcache_page_cheetah); ((u64)&xcall_flush_dcache_page_cheetah);
cheetah_xcall_deliver(data0, cheetah_xcall_deliver(data0,
__pa(page->virtual), __pa(pg_addr),
0, mask); 0, mask);
} }
#ifdef CONFIG_DEBUG_DCFLUSH #ifdef CONFIG_DEBUG_DCFLUSH
...@@ -703,6 +704,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu) ...@@ -703,6 +704,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
void flush_dcache_page_all(struct mm_struct *mm, struct page *page) void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
{ {
void *pg_addr = page_address(page);
cpumask_t mask = cpu_online_map; cpumask_t mask = cpu_online_map;
u64 data0; u64 data0;
int this_cpu = get_cpu(); int this_cpu = get_cpu();
...@@ -719,13 +721,13 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page) ...@@ -719,13 +721,13 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
if (page_mapping(page) != NULL) if (page_mapping(page) != NULL)
data0 |= ((u64)1 << 32); data0 |= ((u64)1 << 32);
spitfire_xcall_deliver(data0, spitfire_xcall_deliver(data0,
__pa(page->virtual), __pa(pg_addr),
(u64) page->virtual, (u64) pg_addr,
mask); mask);
} else { } else {
data0 = ((u64)&xcall_flush_dcache_page_cheetah); data0 = ((u64)&xcall_flush_dcache_page_cheetah);
cheetah_xcall_deliver(data0, cheetah_xcall_deliver(data0,
__pa(page->virtual), __pa(pg_addr),
0, mask); 0, mask);
} }
#ifdef CONFIG_DEBUG_DCFLUSH #ifdef CONFIG_DEBUG_DCFLUSH
......
...@@ -360,6 +360,8 @@ EXPORT_SYMBOL(__bzero_noasi); ...@@ -360,6 +360,8 @@ EXPORT_SYMBOL(__bzero_noasi);
EXPORT_SYMBOL(phys_base); EXPORT_SYMBOL(phys_base);
EXPORT_SYMBOL(pfn_base); EXPORT_SYMBOL(pfn_base);
EXPORT_SYMBOL(sparc64_valid_addr_bitmap); EXPORT_SYMBOL(sparc64_valid_addr_bitmap);
EXPORT_SYMBOL(page_to_pfn);
EXPORT_SYMBOL(pfn_to_page);
/* No version information on this, heavily used in inline asm, /* No version information on this, heavily used in inline asm,
* and will always be 'void __ret_efault(void)'. * and will always be 'void __ret_efault(void)'.
......
...@@ -137,13 +137,13 @@ __inline__ void flush_dcache_page_impl(struct page *page) ...@@ -137,13 +137,13 @@ __inline__ void flush_dcache_page_impl(struct page *page)
#endif #endif
#if (L1DCACHE_SIZE > PAGE_SIZE) #if (L1DCACHE_SIZE > PAGE_SIZE)
__flush_dcache_page(page->virtual, __flush_dcache_page(page_address(page),
((tlb_type == spitfire) && ((tlb_type == spitfire) &&
page_mapping(page) != NULL)); page_mapping(page) != NULL));
#else #else
if (page_mapping(page) != NULL && if (page_mapping(page) != NULL &&
tlb_type == spitfire) tlb_type == spitfire)
__flush_icache_page(__pa(page->virtual)); __flush_icache_page(__pa(page_address(page)));
#endif #endif
} }
...@@ -344,6 +344,16 @@ void flush_icache_range(unsigned long start, unsigned long end) ...@@ -344,6 +344,16 @@ void flush_icache_range(unsigned long start, unsigned long end)
} }
} }
unsigned long page_to_pfn(struct page *page)
{
return (unsigned long) ((page - mem_map) + pfn_base);
}
struct page *pfn_to_page(unsigned long pfn)
{
return (mem_map + (pfn - pfn_base));
}
void show_mem(void) void show_mem(void)
{ {
printk("Mem-info:\n"); printk("Mem-info:\n");
......
...@@ -70,11 +70,11 @@ static loff_t vcs_lseek(struct file *file, loff_t offset, int orig) ...@@ -70,11 +70,11 @@ static loff_t vcs_lseek(struct file *file, loff_t offset, int orig)
{ {
int size; int size;
lock_kernel(); down(&con_buf_sem);
size = vcs_size(file->f_dentry->d_inode); size = vcs_size(file->f_dentry->d_inode);
switch (orig) { switch (orig) {
default: default:
unlock_kernel(); up(&con_buf_sem);
return -EINVAL; return -EINVAL;
case 2: case 2:
offset += size; offset += size;
...@@ -85,28 +85,21 @@ static loff_t vcs_lseek(struct file *file, loff_t offset, int orig) ...@@ -85,28 +85,21 @@ static loff_t vcs_lseek(struct file *file, loff_t offset, int orig)
break; break;
} }
if (offset < 0 || offset > size) { if (offset < 0 || offset > size) {
unlock_kernel(); up(&con_buf_sem);
return -EINVAL; return -EINVAL;
} }
file->f_pos = offset; file->f_pos = offset;
unlock_kernel(); up(&con_buf_sem);
return file->f_pos; return file->f_pos;
} }
/* We share this temporary buffer with the console write code
* so that we can easily avoid touching user space while holding the
* console spinlock.
*/
extern char con_buf[PAGE_SIZE];
#define CON_BUF_SIZE PAGE_SIZE
extern struct semaphore con_buf_sem;
static ssize_t static ssize_t
vcs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) vcs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{ {
struct inode *inode = file->f_dentry->d_inode; struct inode *inode = file->f_dentry->d_inode;
unsigned int currcons = iminor(inode); unsigned int currcons = iminor(inode);
long pos = *ppos; long pos;
long viewed, attr, read; long viewed, attr, read;
int col, maxcol; int col, maxcol;
unsigned short *org = NULL; unsigned short *org = NULL;
...@@ -114,6 +107,8 @@ vcs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) ...@@ -114,6 +107,8 @@ vcs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
down(&con_buf_sem); down(&con_buf_sem);
pos = *ppos;
/* Select the proper current console and verify /* Select the proper current console and verify
* sanity of the situation under the console lock. * sanity of the situation under the console lock.
*/ */
...@@ -275,7 +270,7 @@ vcs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) ...@@ -275,7 +270,7 @@ vcs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
{ {
struct inode *inode = file->f_dentry->d_inode; struct inode *inode = file->f_dentry->d_inode;
unsigned int currcons = iminor(inode); unsigned int currcons = iminor(inode);
long pos = *ppos; long pos;
long viewed, attr, size, written; long viewed, attr, size, written;
char *con_buf0; char *con_buf0;
int col, maxcol; int col, maxcol;
...@@ -284,6 +279,8 @@ vcs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) ...@@ -284,6 +279,8 @@ vcs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
down(&con_buf_sem); down(&con_buf_sem);
pos = *ppos;
/* Select the proper current console and verify /* Select the proper current console and verify
* sanity of the situation under the console lock. * sanity of the situation under the console lock.
*/ */
......
...@@ -1865,7 +1865,6 @@ static void do_con_trol(struct tty_struct *tty, unsigned int currcons, int c) ...@@ -1865,7 +1865,6 @@ static void do_con_trol(struct tty_struct *tty, unsigned int currcons, int c)
* kernel memory allocation is available. * kernel memory allocation is available.
*/ */
char con_buf[PAGE_SIZE]; char con_buf[PAGE_SIZE];
#define CON_BUF_SIZE PAGE_SIZE
DECLARE_MUTEX(con_buf_sem); DECLARE_MUTEX(con_buf_sem);
/* acquires console_sem */ /* acquires console_sem */
......
...@@ -33,7 +33,7 @@ void radeonfb_fillrect(struct fb_info *info, const struct fb_fillrect *region) ...@@ -33,7 +33,7 @@ void radeonfb_fillrect(struct fb_info *info, const struct fb_fillrect *region)
if (info->state != FBINFO_STATE_RUNNING) if (info->state != FBINFO_STATE_RUNNING)
return; return;
if (radeon_accel_disabled()) { if (info->flags & FBINFO_HWACCEL_DISABLED) {
cfb_fillrect(info, region); cfb_fillrect(info, region);
return; return;
} }
...@@ -99,7 +99,7 @@ void radeonfb_copyarea(struct fb_info *info, const struct fb_copyarea *area) ...@@ -99,7 +99,7 @@ void radeonfb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
if (info->state != FBINFO_STATE_RUNNING) if (info->state != FBINFO_STATE_RUNNING)
return; return;
if (radeon_accel_disabled()) { if (info->flags & FBINFO_HWACCEL_DISABLED) {
cfb_copyarea(info, area); cfb_copyarea(info, area);
return; return;
} }
......
...@@ -242,8 +242,6 @@ static int force_measure_pll = 0; ...@@ -242,8 +242,6 @@ static int force_measure_pll = 0;
static int nomtrr = 0; static int nomtrr = 0;
#endif #endif
int radeonfb_noaccel = 0;
/* /*
* prototypes * prototypes
*/ */
...@@ -810,9 +808,8 @@ static int radeonfb_check_var (struct fb_var_screeninfo *var, struct fb_info *in ...@@ -810,9 +808,8 @@ static int radeonfb_check_var (struct fb_var_screeninfo *var, struct fb_info *in
/* XXX I'm adjusting xres_virtual to the pitch, that may help XFree /* XXX I'm adjusting xres_virtual to the pitch, that may help XFree
* with some panels, though I don't quite like this solution * with some panels, though I don't quite like this solution
*/ */
if (radeon_accel_disabled()) { if (rinfo->info->flags & FBINFO_HWACCEL_DISABLED) {
v.xres_virtual = v.xres_virtual & ~7ul; v.xres_virtual = v.xres_virtual & ~7ul;
v.accel_flags = 0;
} else { } else {
pitch = ((v.xres_virtual * ((v.bits_per_pixel + 1) / 8) + 0x3f) pitch = ((v.xres_virtual * ((v.bits_per_pixel + 1) / 8) + 0x3f)
& ~(0x3f)) >> 6; & ~(0x3f)) >> 6;
...@@ -1535,7 +1532,7 @@ int radeonfb_set_par(struct fb_info *info) ...@@ -1535,7 +1532,7 @@ int radeonfb_set_par(struct fb_info *info)
newmode.crtc_v_sync_strt_wid = (((vSyncStart - 1) & 0xfff) | newmode.crtc_v_sync_strt_wid = (((vSyncStart - 1) & 0xfff) |
(vsync_wid << 16) | (v_sync_pol << 23)); (vsync_wid << 16) | (v_sync_pol << 23));
if (!radeon_accel_disabled()) { if (!(info->flags & FBINFO_HWACCEL_DISABLED)) {
/* We first calculate the engine pitch */ /* We first calculate the engine pitch */
rinfo->pitch = ((mode->xres_virtual * ((mode->bits_per_pixel + 1) / 8) + 0x3f) rinfo->pitch = ((mode->xres_virtual * ((mode->bits_per_pixel + 1) / 8) + 0x3f)
& ~(0x3f)) >> 6; & ~(0x3f)) >> 6;
...@@ -1683,12 +1680,11 @@ int radeonfb_set_par(struct fb_info *info) ...@@ -1683,12 +1680,11 @@ int radeonfb_set_par(struct fb_info *info)
if (!rinfo->asleep) { if (!rinfo->asleep) {
radeon_write_mode (rinfo, &newmode); radeon_write_mode (rinfo, &newmode);
/* (re)initialize the engine */ /* (re)initialize the engine */
if (!radeon_accel_disabled()) if (!(info->flags & FBINFO_HWACCEL_DISABLED))
radeonfb_engine_init (rinfo); radeonfb_engine_init (rinfo);
} }
/* Update fix */ /* Update fix */
if (!radeon_accel_disabled()) if (!(info->flags & FBINFO_HWACCEL_DISABLED))
info->fix.line_length = rinfo->pitch*64; info->fix.line_length = rinfo->pitch*64;
else else
info->fix.line_length = mode->xres_virtual info->fix.line_length = mode->xres_virtual
...@@ -1793,9 +1789,13 @@ static int __devinit radeon_set_fbinfo (struct radeonfb_info *rinfo) ...@@ -1793,9 +1789,13 @@ static int __devinit radeon_set_fbinfo (struct radeonfb_info *rinfo)
info->currcon = -1; info->currcon = -1;
info->par = rinfo; info->par = rinfo;
info->pseudo_palette = rinfo->pseudo_palette; info->pseudo_palette = rinfo->pseudo_palette;
info->flags = FBINFO_FLAG_DEFAULT; info->flags = FBINFO_DEFAULT
info->fbops = &radeonfb_ops; | FBINFO_HWACCEL_COPYAREA
info->screen_base = (char *)rinfo->fb_base; | FBINFO_HWACCEL_FILLRECT
| FBINFO_HWACCEL_XPAN
| FBINFO_HWACCEL_YPAN;
info->fbops = &radeonfb_ops;
info->screen_base = (char *)rinfo->fb_base;
/* Fill fix common fields */ /* Fill fix common fields */
strlcpy(info->fix.id, rinfo->name, sizeof(info->fix.id)); strlcpy(info->fix.id, rinfo->name, sizeof(info->fix.id));
...@@ -1809,17 +1809,11 @@ static int __devinit radeon_set_fbinfo (struct radeonfb_info *rinfo) ...@@ -1809,17 +1809,11 @@ static int __devinit radeon_set_fbinfo (struct radeonfb_info *rinfo)
info->fix.type_aux = 0; info->fix.type_aux = 0;
info->fix.mmio_start = rinfo->mmio_base_phys; info->fix.mmio_start = rinfo->mmio_base_phys;
info->fix.mmio_len = RADEON_REGSIZE; info->fix.mmio_len = RADEON_REGSIZE;
if (radeon_accel_disabled())
info->fix.accel = FB_ACCEL_NONE;
else
info->fix.accel = FB_ACCEL_ATI_RADEON;
fb_alloc_cmap(&info->cmap, 256, 0); fb_alloc_cmap(&info->cmap, 256, 0);
if (radeon_accel_disabled()) if (noaccel)
info->var.accel_flags &= ~FB_ACCELF_TEXT; info->flags |= FBINFO_HWACCEL_DISABLED;
else
info->var.accel_flags |= FB_ACCELF_TEXT;
return 0; return 0;
} }
...@@ -2451,7 +2445,6 @@ static struct pci_driver radeonfb_driver = { ...@@ -2451,7 +2445,6 @@ static struct pci_driver radeonfb_driver = {
int __init radeonfb_init (void) int __init radeonfb_init (void)
{ {
radeonfb_noaccel = noaccel;
return pci_module_init (&radeonfb_driver); return pci_module_init (&radeonfb_driver);
} }
...@@ -2473,7 +2466,7 @@ int __init radeonfb_setup (char *options) ...@@ -2473,7 +2466,7 @@ int __init radeonfb_setup (char *options)
continue; continue;
if (!strncmp(this_opt, "noaccel", 7)) { if (!strncmp(this_opt, "noaccel", 7)) {
noaccel = radeonfb_noaccel = 1; noaccel = 1;
} else if (!strncmp(this_opt, "mirror", 6)) { } else if (!strncmp(this_opt, "mirror", 6)) {
mirror = 1; mirror = 1;
} else if (!strncmp(this_opt, "force_dfp", 9)) { } else if (!strncmp(this_opt, "force_dfp", 9)) {
......
...@@ -859,7 +859,7 @@ int radeonfb_pci_suspend(struct pci_dev *pdev, u32 state) ...@@ -859,7 +859,7 @@ int radeonfb_pci_suspend(struct pci_dev *pdev, u32 state)
fb_set_suspend(info, 1); fb_set_suspend(info, 1);
if (!radeon_accel_disabled()) { if (!(info->flags & FBINFO_HWACCEL_DISABLED)) {
/* Make sure engine is reset */ /* Make sure engine is reset */
radeon_engine_idle(); radeon_engine_idle();
radeonfb_engine_reset(rinfo); radeonfb_engine_reset(rinfo);
......
...@@ -516,12 +516,6 @@ static inline void _radeon_engine_idle (struct radeonfb_info *rinfo) ...@@ -516,12 +516,6 @@ static inline void _radeon_engine_idle (struct radeonfb_info *rinfo)
printk(KERN_ERR "radeonfb: Idle Timeout !\n"); printk(KERN_ERR "radeonfb: Idle Timeout !\n");
} }
static inline int radeon_accel_disabled(void)
{
extern int radeonfb_noaccel;
return radeonfb_noaccel;
}
#define radeon_engine_idle() _radeon_engine_idle(rinfo) #define radeon_engine_idle() _radeon_engine_idle(rinfo)
#define radeon_fifo_wait(entries) _radeon_fifo_wait(rinfo,entries) #define radeon_fifo_wait(entries) _radeon_fifo_wait(rinfo,entries)
......
...@@ -316,7 +316,7 @@ static inline int exec_permission_lite(struct inode *inode, ...@@ -316,7 +316,7 @@ static inline int exec_permission_lite(struct inode *inode,
{ {
umode_t mode = inode->i_mode; umode_t mode = inode->i_mode;
if ((inode->i_op && inode->i_op->permission)) if (inode->i_op && inode->i_op->permission)
return -EAGAIN; return -EAGAIN;
if (current->fsuid == inode->i_uid) if (current->fsuid == inode->i_uid)
...@@ -330,6 +330,9 @@ static inline int exec_permission_lite(struct inode *inode, ...@@ -330,6 +330,9 @@ static inline int exec_permission_lite(struct inode *inode,
if ((inode->i_mode & S_IXUGO) && capable(CAP_DAC_OVERRIDE)) if ((inode->i_mode & S_IXUGO) && capable(CAP_DAC_OVERRIDE))
goto ok; goto ok;
if (S_ISDIR(inode->i_mode) && capable(CAP_DAC_OVERRIDE))
goto ok;
if (S_ISDIR(inode->i_mode) && capable(CAP_DAC_READ_SEARCH)) if (S_ISDIR(inode->i_mode) && capable(CAP_DAC_READ_SEARCH))
goto ok; goto ok;
......
...@@ -521,6 +521,7 @@ int smb_fill_super(struct super_block *sb, void *raw_data, int silent) ...@@ -521,6 +521,7 @@ int smb_fill_super(struct super_block *sb, void *raw_data, int silent)
server->super_block = sb; server->super_block = sb;
server->mnt = NULL; server->mnt = NULL;
server->sock_file = NULL; server->sock_file = NULL;
init_waitqueue_head(&server->conn_wq);
init_MUTEX(&server->sem); init_MUTEX(&server->sem);
INIT_LIST_HEAD(&server->entry); INIT_LIST_HEAD(&server->entry);
INIT_LIST_HEAD(&server->xmitq); INIT_LIST_HEAD(&server->xmitq);
......
...@@ -56,6 +56,7 @@ static struct smb_ops smb_ops_os2; ...@@ -56,6 +56,7 @@ static struct smb_ops smb_ops_os2;
static struct smb_ops smb_ops_win95; static struct smb_ops smb_ops_win95;
static struct smb_ops smb_ops_winNT; static struct smb_ops smb_ops_winNT;
static struct smb_ops smb_ops_unix; static struct smb_ops smb_ops_unix;
static struct smb_ops smb_ops_null;
static void static void
smb_init_dirent(struct smb_sb_info *server, struct smb_fattr *fattr); smb_init_dirent(struct smb_sb_info *server, struct smb_fattr *fattr);
...@@ -981,6 +982,9 @@ smb_newconn(struct smb_sb_info *server, struct smb_conn_opt *opt) ...@@ -981,6 +982,9 @@ smb_newconn(struct smb_sb_info *server, struct smb_conn_opt *opt)
smbiod_wake_up(); smbiod_wake_up();
if (server->opt.capabilities & SMB_CAP_UNIX) if (server->opt.capabilities & SMB_CAP_UNIX)
smb_proc_query_cifsunix(server); smb_proc_query_cifsunix(server);
server->conn_complete++;
wake_up_interruptible_all(&server->conn_wq);
return error; return error;
out: out:
...@@ -2793,11 +2797,46 @@ smb_proc_getattr_95(struct smb_sb_info *server, struct dentry *dir, ...@@ -2793,11 +2797,46 @@ smb_proc_getattr_95(struct smb_sb_info *server, struct dentry *dir,
return result; return result;
} }
static int
smb_proc_ops_wait(struct smb_sb_info *server)
{
int result;
result = wait_event_interruptible_timeout(server->conn_wq,
server->conn_complete, 30*HZ);
if (!result || signal_pending(current))
return -EIO;
return 0;
}
static int static int
smb_proc_getattr_null(struct smb_sb_info *server, struct dentry *dir, smb_proc_getattr_null(struct smb_sb_info *server, struct dentry *dir,
struct smb_fattr *attr) struct smb_fattr *fattr)
{ {
return -EIO; int result;
if (smb_proc_ops_wait(server) < 0)
return -EIO;
smb_init_dirent(server, fattr);
result = server->ops->getattr(server, dir, fattr);
smb_finish_dirent(server, fattr);
return result;
}
static int
smb_proc_readdir_null(struct file *filp, void *dirent, filldir_t filldir,
struct smb_cache_control *ctl)
{
struct smb_sb_info *server = server_from_dentry(filp->f_dentry);
if (smb_proc_ops_wait(server) < 0)
return -EIO;
return server->ops->readdir(filp, dirent, filldir, ctl);
} }
int int
...@@ -3431,6 +3470,7 @@ static struct smb_ops smb_ops_unix = ...@@ -3431,6 +3470,7 @@ static struct smb_ops smb_ops_unix =
/* Place holder until real ops are in place */ /* Place holder until real ops are in place */
static struct smb_ops smb_ops_null = static struct smb_ops smb_ops_null =
{ {
.readdir = smb_proc_readdir_null,
.getattr = smb_proc_getattr_null, .getattr = smb_proc_getattr_null,
}; };
......
#error "<asm/init.h> should never be used - use <linux/init.h> instead"
#ifndef _H8300_INIT_H
#define _H8300_INIT_H
#define __init __attribute__ ((__section__ (".text.init")))
#define __initdata __attribute__ ((__section__ (".data.init")))
/* For assembly routines */
#define __INIT .section ".text.init",#alloc,#execinstr
#define __FINIT .previous
#define __INITDATA .section ".data.init",#alloc,#write
#endif
#error "<asm/init.h> should never be used - use <linux/init.h> instead"
#ifndef _M68K_INIT_H
#define _M68K_INIT_H
#define __init __attribute__ ((__section__ (".text.init")))
#define __initdata __attribute__ ((__section__ (".data.init")))
/* For assembly routines */
#define __INIT .section ".text.init",#alloc,#execinstr
#define __FINIT .previous
#define __INITDATA .section ".data.init",#alloc,#write
#endif
#include <asm-m68k/init.h>
#error "<asm/init.h> should never be used - use <linux/init.h> instead"
...@@ -5,6 +5,8 @@ ...@@ -5,6 +5,8 @@
#include <linux/config.h> #include <linux/config.h>
#include <linux/init.h> #include <linux/init.h>
#include <asm/setup.h>
#ifdef CONFIG_APUS #ifdef CONFIG_APUS
#include <asm-m68k/machdep.h> #include <asm-m68k/machdep.h>
#endif #endif
......
#error "<asm/init.h> should never be used - use <linux/init.h> instead"
/*
* include/asm-s390/init.h
*
* S390 version
*/
#error "<asm/init.h> should never be used - use <linux/init.h> instead"
#error "<asm/init.h> should never be used - use <linux/init.h> instead"
#error "<asm/init.h> should never be used - use <linux/init.h> instead"
#error "<asm/init.h> should never be used - use <linux/init.h> instead"
...@@ -19,8 +19,8 @@ extern unsigned long bus_to_virt_not_defined_use_pci_map(volatile void *addr); ...@@ -19,8 +19,8 @@ extern unsigned long bus_to_virt_not_defined_use_pci_map(volatile void *addr);
#define bus_to_virt bus_to_virt_not_defined_use_pci_map #define bus_to_virt bus_to_virt_not_defined_use_pci_map
/* BIO layer definitions. */ /* BIO layer definitions. */
extern unsigned long phys_base, kern_base, kern_size; extern unsigned long kern_base, kern_size;
#define page_to_phys(page) ((((page) - mem_map) << PAGE_SHIFT)+phys_base) #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
#define BIO_VMERGE_BOUNDARY 8192 #define BIO_VMERGE_BOUNDARY 8192
/* Different PCI controllers we support have their PCI MEM space /* Different PCI controllers we support have their PCI MEM space
......
...@@ -14,9 +14,6 @@ ...@@ -14,9 +14,6 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
/* Sparc64 is slow at multiplication, we prefer to use some extra space. */
#define WANT_PAGE_VIRTUAL 1
extern void _clear_page(void *page); extern void _clear_page(void *page);
#define clear_page(X) _clear_page((void *)(X)) #define clear_page(X) _clear_page((void *)(X))
struct page; struct page;
...@@ -111,17 +108,19 @@ typedef unsigned long iopgprot_t; ...@@ -111,17 +108,19 @@ typedef unsigned long iopgprot_t;
*/ */
#define PAGE_OFFSET _AC(0xFFFFF80000000000,UL) #define PAGE_OFFSET _AC(0xFFFFF80000000000,UL)
#ifndef __ASSEMBLY__
#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET) #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET)
#define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET)) #define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET))
/* PFNs are real physical page numbers. However, mem_map only begins to record /* PFNs are real physical page numbers. However, mem_map only begins to record
* per-page information starting at pfn_base. This is to handle systems where * per-page information starting at pfn_base. This is to handle systems where
* the first physical page in the machine is at some huge physical address, such * the first physical page in the machine is at some huge physical address,
* as 4GB. This is common on a partitioned E10000, for example. * such as 4GB. This is common on a partitioned E10000, for example.
*/ */
extern struct page *pfn_to_page(unsigned long pfn);
extern unsigned long page_to_pfn(struct page *);
#define pfn_to_page(pfn) (mem_map + ((pfn)-(pfn_base)))
#define page_to_pfn(page) ((unsigned long)(((page) - mem_map) + pfn_base))
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr)>>PAGE_SHIFT) #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr)>>PAGE_SHIFT)
#define pfn_valid(pfn) (((pfn)-(pfn_base)) < max_mapnr) #define pfn_valid(pfn) (((pfn)-(pfn_base)) < max_mapnr)
...@@ -130,8 +129,6 @@ typedef unsigned long iopgprot_t; ...@@ -130,8 +129,6 @@ typedef unsigned long iopgprot_t;
#define virt_to_phys __pa #define virt_to_phys __pa
#define phys_to_virt __va #define phys_to_virt __va
#ifndef __ASSEMBLY__
/* The following structure is used to hold the physical /* The following structure is used to hold the physical
* memory configuration of the machine. This is filled in * memory configuration of the machine. This is filled in
* probe_memory() and is later used by mem_init() to set up * probe_memory() and is later used by mem_init() to set up
......
#ifndef _UM_INIT_H
#define _UM_INIT_H
#ifdef notdef
#define __init
#define __initdata
#define __initfunc(__arginit) __arginit
#define __cacheline_aligned
#endif
#endif
#error "<asm/init.h> should never be used - use <linux/init.h> instead"
...@@ -81,7 +81,34 @@ struct per_cpu_pageset { ...@@ -81,7 +81,34 @@ struct per_cpu_pageset {
#define MAX_NR_ZONES 3 /* Sync this with ZONES_SHIFT */ #define MAX_NR_ZONES 3 /* Sync this with ZONES_SHIFT */
#define ZONES_SHIFT 2 /* ceil(log2(MAX_NR_ZONES)) */ #define ZONES_SHIFT 2 /* ceil(log2(MAX_NR_ZONES)) */
/*
* When a memory allocation must conform to specific limitations (such
* as being suitable for DMA) the caller will pass in hints to the
* allocator in the gfp_mask, in the zone modifier bits. These bits
* are used to select a priority ordered list of memory zones which
* match the requested limits. GFP_ZONEMASK defines which bits within
* the gfp_mask should be considered as zone modifiers. Each valid
* combination of the zone modifier bits has a corresponding list
* of zones (in node_zonelists). Thus for two zone modifiers there
* will be a maximum of 4 (2 ** 2) zonelists, for 3 modifiers there will
* be 8 (2 ** 3) zonelists. GFP_ZONETYPES defines the number of possible
* combinations of zone modifiers in "zone modifier space".
*/
#define GFP_ZONEMASK 0x03 #define GFP_ZONEMASK 0x03
/*
* As an optimisation any zone modifier bits which are only valid when
* no other zone modifier bits are set (loners) should be placed in
* the highest order bits of this field. This allows us to reduce the
* extent of the zonelists thus saving space. For example in the case
* of three zone modifier bits, we could require up to eight zonelists.
* If the left most zone modifier is a "loner" then the highest valid
* zonelist would be four allowing us to allocate only five zonelists.
* Use the first form when the left most bit is not a "loner", otherwise
* use the second.
*/
/* #define GFP_ZONETYPES (GFP_ZONEMASK + 1) */ /* Non-loner */
#define GFP_ZONETYPES ((GFP_ZONEMASK + 1) / 2 + 1) /* Loner */
/* /*
* On machines where it is needed (eg PCs) we divide physical memory * On machines where it is needed (eg PCs) we divide physical memory
...@@ -237,7 +264,7 @@ struct zonelist { ...@@ -237,7 +264,7 @@ struct zonelist {
struct bootmem_data; struct bootmem_data;
typedef struct pglist_data { typedef struct pglist_data {
struct zone node_zones[MAX_NR_ZONES]; struct zone node_zones[MAX_NR_ZONES];
struct zonelist node_zonelists[MAX_NR_ZONES]; struct zonelist node_zonelists[GFP_ZONETYPES];
int nr_zones; int nr_zones;
struct page *node_mem_map; struct page *node_mem_map;
struct bootmem_data *bdata; struct bootmem_data *bdata;
......
...@@ -57,7 +57,8 @@ struct smb_sb_info { ...@@ -57,7 +57,8 @@ struct smb_sb_info {
unsigned int generation; unsigned int generation;
pid_t conn_pid; pid_t conn_pid;
struct smb_conn_opt opt; struct smb_conn_opt opt;
wait_queue_head_t conn_wq;
int conn_complete;
struct semaphore sem; struct semaphore sem;
unsigned char header[SMB_HEADER_LEN + 20*2 + 2]; unsigned char header[SMB_HEADER_LEN + 20*2 + 2];
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include <linux/kd.h> #include <linux/kd.h>
#include <linux/tty.h> #include <linux/tty.h>
#include <linux/console_struct.h> #include <linux/console_struct.h>
#include <linux/mm.h>
/* /*
* Presently, a lot of graphics programs do not restore the contents of * Presently, a lot of graphics programs do not restore the contents of
...@@ -84,4 +85,12 @@ int vt_waitactive(int vt); ...@@ -84,4 +85,12 @@ int vt_waitactive(int vt);
void change_console(unsigned int); void change_console(unsigned int);
void reset_vc(unsigned int new_console); void reset_vc(unsigned int new_console);
/*
* vc_screen.c shares this temporary buffer with the console write code so that
* we can easily avoid touching user space while holding the console spinlock.
*/
extern char con_buf[PAGE_SIZE];
#define CON_BUF_SIZE PAGE_SIZE
extern struct semaphore con_buf_sem;
#endif /* _VT_KERN_H */ #endif /* _VT_KERN_H */
...@@ -32,10 +32,7 @@ ...@@ -32,10 +32,7 @@
#include <linux/in_route.h> #include <linux/in_route.h>
#include <net/route.h> #include <net/route.h>
#include <net/arp.h> #include <net/arp.h>
#ifndef _SNMP_H
#include <net/snmp.h> #include <net/snmp.h>
#endif
struct sock; struct sock;
......
...@@ -1235,7 +1235,7 @@ static void __init build_zonelists(pg_data_t *pgdat) ...@@ -1235,7 +1235,7 @@ static void __init build_zonelists(pg_data_t *pgdat)
DECLARE_BITMAP(used_mask, MAX_NUMNODES); DECLARE_BITMAP(used_mask, MAX_NUMNODES);
/* initialize zonelists */ /* initialize zonelists */
for (i = 0; i < MAX_NR_ZONES; i++) { for (i = 0; i < GFP_ZONETYPES; i++) {
zonelist = pgdat->node_zonelists + i; zonelist = pgdat->node_zonelists + i;
memset(zonelist, 0, sizeof(*zonelist)); memset(zonelist, 0, sizeof(*zonelist));
zonelist->zones[0] = NULL; zonelist->zones[0] = NULL;
...@@ -1257,7 +1257,7 @@ static void __init build_zonelists(pg_data_t *pgdat) ...@@ -1257,7 +1257,7 @@ static void __init build_zonelists(pg_data_t *pgdat)
node_load[node] += load; node_load[node] += load;
prev_node = node; prev_node = node;
load--; load--;
for (i = 0; i < MAX_NR_ZONES; i++) { for (i = 0; i < GFP_ZONETYPES; i++) {
zonelist = pgdat->node_zonelists + i; zonelist = pgdat->node_zonelists + i;
for (j = 0; zonelist->zones[j] != NULL; j++); for (j = 0; zonelist->zones[j] != NULL; j++);
...@@ -1280,7 +1280,7 @@ static void __init build_zonelists(pg_data_t *pgdat) ...@@ -1280,7 +1280,7 @@ static void __init build_zonelists(pg_data_t *pgdat)
int i, j, k, node, local_node; int i, j, k, node, local_node;
local_node = pgdat->node_id; local_node = pgdat->node_id;
for (i = 0; i < MAX_NR_ZONES; i++) { for (i = 0; i < GFP_ZONETYPES; i++) {
struct zonelist *zonelist; struct zonelist *zonelist;
zonelist = pgdat->node_zonelists + i; zonelist = pgdat->node_zonelists + i;
...@@ -1840,7 +1840,7 @@ static void setup_per_zone_protection(void) ...@@ -1840,7 +1840,7 @@ static void setup_per_zone_protection(void)
* For each of the different allocation types: * For each of the different allocation types:
* GFP_DMA -> GFP_KERNEL -> GFP_HIGHMEM * GFP_DMA -> GFP_KERNEL -> GFP_HIGHMEM
*/ */
for (i = 0; i < MAX_NR_ZONES; i++) { for (i = 0; i < GFP_ZONETYPES; i++) {
/* /*
* For each of the zones: * For each of the zones:
* ZONE_HIGHMEM -> ZONE_NORMAL -> ZONE_DMA * ZONE_HIGHMEM -> ZONE_NORMAL -> ZONE_DMA
......
...@@ -558,7 +558,7 @@ Note: we do not have explicit unassign, but look at _push() ...@@ -558,7 +558,7 @@ Note: we do not have explicit unassign, but look at _push()
atmvcc->push = br2684_push; atmvcc->push = br2684_push;
skb_queue_head_init(&copy); skb_queue_head_init(&copy);
skb_migrate(&atmvcc->sk->sk_receive_queue, &copy); skb_migrate(&atmvcc->sk->sk_receive_queue, &copy);
while ((skb = skb_dequeue(&copy))) { while ((skb = skb_dequeue(&copy)) != NULL) {
BRPRIV(skb->dev)->stats.rx_bytes -= skb->len; BRPRIV(skb->dev)->stats.rx_bytes -= skb->len;
BRPRIV(skb->dev)->stats.rx_packets--; BRPRIV(skb->dev)->stats.rx_packets--;
br2684_push(atmvcc, skb); br2684_push(atmvcc, skb);
......
...@@ -503,7 +503,7 @@ static int clip_mkip(struct atm_vcc *vcc,int timeout) ...@@ -503,7 +503,7 @@ static int clip_mkip(struct atm_vcc *vcc,int timeout)
skb_queue_head_init(&copy); skb_queue_head_init(&copy);
skb_migrate(&vcc->sk->sk_receive_queue, &copy); skb_migrate(&vcc->sk->sk_receive_queue, &copy);
/* re-process everything received between connection setup and MKIP */ /* re-process everything received between connection setup and MKIP */
while ((skb = skb_dequeue(&copy))) while ((skb = skb_dequeue(&copy)) != NULL)
if (!clip_devs) { if (!clip_devs) {
atm_return(vcc,skb->truesize); atm_return(vcc,skb->truesize);
kfree_skb(skb); kfree_skb(skb);
......
...@@ -187,7 +187,7 @@ static void vcc_destroy_socket(struct sock *sk) ...@@ -187,7 +187,7 @@ static void vcc_destroy_socket(struct sock *sk)
vcc_remove_socket(sk); /* no more receive */ vcc_remove_socket(sk); /* no more receive */
while ((skb = skb_dequeue(&vcc->sk->sk_receive_queue))) { while ((skb = skb_dequeue(&vcc->sk->sk_receive_queue)) != NULL) {
atm_return(vcc,skb->truesize); atm_return(vcc,skb->truesize);
kfree_skb(skb); kfree_skb(skb);
} }
......
...@@ -567,7 +567,7 @@ lec_atm_close(struct atm_vcc *vcc) ...@@ -567,7 +567,7 @@ lec_atm_close(struct atm_vcc *vcc)
if (skb_peek(&vcc->sk->sk_receive_queue)) if (skb_peek(&vcc->sk->sk_receive_queue))
printk("%s lec_atm_close: closing with messages pending\n", printk("%s lec_atm_close: closing with messages pending\n",
dev->name); dev->name);
while ((skb = skb_dequeue(&vcc->sk->sk_receive_queue))) { while ((skb = skb_dequeue(&vcc->sk->sk_receive_queue)) != NULL) {
atm_return(vcc, skb->truesize); atm_return(vcc, skb->truesize);
dev_kfree_skb(skb); dev_kfree_skb(skb);
} }
...@@ -1940,7 +1940,7 @@ lec_arp_check_expire(unsigned long data) ...@@ -1940,7 +1940,7 @@ lec_arp_check_expire(unsigned long data)
priv->path_switching_delay)) { priv->path_switching_delay)) {
struct sk_buff *skb; struct sk_buff *skb;
while ((skb = skb_dequeue(&entry->tx_wait))) while ((skb = skb_dequeue(&entry->tx_wait)) != NULL)
lec_send(entry->vcc, skb, entry->priv); lec_send(entry->vcc, skb, entry->priv);
entry->last_used = jiffies; entry->last_used = jiffies;
entry->status = entry->status =
...@@ -2337,7 +2337,7 @@ lec_flush_complete(struct lec_priv *priv, unsigned long tran_id) ...@@ -2337,7 +2337,7 @@ lec_flush_complete(struct lec_priv *priv, unsigned long tran_id)
entry->status == ESI_FLUSH_PENDING) { entry->status == ESI_FLUSH_PENDING) {
struct sk_buff *skb; struct sk_buff *skb;
while ((skb = skb_dequeue(&entry->tx_wait))) while ((skb = skb_dequeue(&entry->tx_wait)) != NULL)
lec_send(entry->vcc, skb, entry->priv); lec_send(entry->vcc, skb, entry->priv);
entry->status = ESI_FORWARD_DIRECT; entry->status = ESI_FORWARD_DIRECT;
DPRINTK("LEC_ARP: Flushed\n"); DPRINTK("LEC_ARP: Flushed\n");
......
...@@ -66,7 +66,7 @@ static void svc_disconnect(struct atm_vcc *vcc) ...@@ -66,7 +66,7 @@ static void svc_disconnect(struct atm_vcc *vcc)
} }
/* beware - socket is still in use by atmsigd until the last /* beware - socket is still in use by atmsigd until the last
as_indicate has been answered */ as_indicate has been answered */
while ((skb = skb_dequeue(&vcc->sk->sk_receive_queue))) { while ((skb = skb_dequeue(&vcc->sk->sk_receive_queue)) != NULL) {
DPRINTK("LISTEN REL\n"); DPRINTK("LISTEN REL\n");
sigd_enq2(NULL,as_reject,vcc,NULL,NULL,&vcc->qos,0); sigd_enq2(NULL,as_reject,vcc,NULL,NULL,&vcc->qos,0);
dev_kfree_skb(skb); dev_kfree_skb(skb);
......
...@@ -523,7 +523,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname, ...@@ -523,7 +523,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
v.tm.tv_usec = 0; v.tm.tv_usec = 0;
} else { } else {
v.tm.tv_sec = sk->sk_rcvtimeo / HZ; v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000) / HZ; v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
} }
break; break;
...@@ -534,7 +534,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname, ...@@ -534,7 +534,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
v.tm.tv_usec = 0; v.tm.tv_usec = 0;
} else { } else {
v.tm.tv_sec = sk->sk_sndtimeo / HZ; v.tm.tv_sec = sk->sk_sndtimeo / HZ;
v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000) / HZ; v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
} }
break; break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment