Commit 05177e12 authored by Linus Torvalds's avatar Linus Torvalds

Merge bk://linux-scsi.bkbits.net/scsi-for-linus-2.6

into ppc970.osdl.org:/home/torvalds/v2.6/linux
parents 5c64e94a 7bcfc318
...@@ -40,6 +40,7 @@ int nr_cpus_in_node[MAX_NUMNODES] = { [0 ... (MAX_NUMNODES -1)] = 0}; ...@@ -40,6 +40,7 @@ int nr_cpus_in_node[MAX_NUMNODES] = { [0 ... (MAX_NUMNODES -1)] = 0};
struct pglist_data *node_data[MAX_NUMNODES]; struct pglist_data *node_data[MAX_NUMNODES];
bootmem_data_t __initdata plat_node_bdata[MAX_NUMNODES]; bootmem_data_t __initdata plat_node_bdata[MAX_NUMNODES];
static unsigned long node0_io_hole_size;
static int min_common_depth; static int min_common_depth;
/* /*
...@@ -48,8 +49,7 @@ static int min_common_depth; ...@@ -48,8 +49,7 @@ static int min_common_depth;
*/ */
static struct { static struct {
unsigned long node_start_pfn; unsigned long node_start_pfn;
unsigned long node_end_pfn; unsigned long node_spanned_pages;
unsigned long node_present_pages;
} init_node_data[MAX_NUMNODES] __initdata; } init_node_data[MAX_NUMNODES] __initdata;
EXPORT_SYMBOL(node_data); EXPORT_SYMBOL(node_data);
...@@ -186,31 +186,14 @@ static int __init find_min_common_depth(void) ...@@ -186,31 +186,14 @@ static int __init find_min_common_depth(void)
return depth; return depth;
} }
static int __init get_mem_addr_cells(void) static unsigned long read_cell_ul(struct device_node *device, unsigned int **buf)
{
struct device_node *memory = NULL;
memory = of_find_node_by_type(memory, "memory");
if (!memory)
return 0; /* it won't matter */
return(prom_n_addr_cells(memory));
}
static int __init get_mem_size_cells(void)
{
struct device_node *memory = NULL;
memory = of_find_node_by_type(memory, "memory");
if (!memory)
return 0; /* it won't matter */
return(prom_n_size_cells(memory));
}
static unsigned long read_n_cells(int n, unsigned int **buf)
{ {
int i;
unsigned long result = 0; unsigned long result = 0;
while (n--) { i = prom_n_size_cells(device);
/* bug on i>2 ?? */
while (i--) {
result = (result << 32) | **buf; result = (result << 32) | **buf;
(*buf)++; (*buf)++;
} }
...@@ -284,7 +267,6 @@ static int __init parse_numa_properties(void) ...@@ -284,7 +267,6 @@ static int __init parse_numa_properties(void)
{ {
struct device_node *cpu = NULL; struct device_node *cpu = NULL;
struct device_node *memory = NULL; struct device_node *memory = NULL;
int addr_cells, size_cells;
int max_domain = 0; int max_domain = 0;
long entries = lmb_end_of_DRAM() >> MEMORY_INCREMENT_SHIFT; long entries = lmb_end_of_DRAM() >> MEMORY_INCREMENT_SHIFT;
unsigned long i; unsigned long i;
...@@ -331,8 +313,6 @@ static int __init parse_numa_properties(void) ...@@ -331,8 +313,6 @@ static int __init parse_numa_properties(void)
} }
} }
addr_cells = get_mem_addr_cells();
size_cells = get_mem_size_cells();
memory = NULL; memory = NULL;
while ((memory = of_find_node_by_type(memory, "memory")) != NULL) { while ((memory = of_find_node_by_type(memory, "memory")) != NULL) {
unsigned long start; unsigned long start;
...@@ -349,8 +329,8 @@ static int __init parse_numa_properties(void) ...@@ -349,8 +329,8 @@ static int __init parse_numa_properties(void)
ranges = memory->n_addrs; ranges = memory->n_addrs;
new_range: new_range:
/* these are order-sensitive, and modify the buffer pointer */ /* these are order-sensitive, and modify the buffer pointer */
start = read_n_cells(addr_cells, &memcell_buf); start = read_cell_ul(memory, &memcell_buf);
size = read_n_cells(size_cells, &memcell_buf); size = read_cell_ul(memory, &memcell_buf);
start = _ALIGN_DOWN(start, MEMORY_INCREMENT); start = _ALIGN_DOWN(start, MEMORY_INCREMENT);
size = _ALIGN_UP(size, MEMORY_INCREMENT); size = _ALIGN_UP(size, MEMORY_INCREMENT);
...@@ -369,27 +349,32 @@ static int __init parse_numa_properties(void) ...@@ -369,27 +349,32 @@ static int __init parse_numa_properties(void)
max_domain = numa_domain; max_domain = numa_domain;
/* /*
* Initialize new node struct, or add to an existing one. * For backwards compatibility, OF splits the first node
* into two regions (the first being 0-4GB). Check for
* this simple case and complain if there is a gap in
* memory
*/ */
if (init_node_data[numa_domain].node_end_pfn) { if (init_node_data[numa_domain].node_spanned_pages) {
if ((start / PAGE_SIZE) < unsigned long shouldstart =
init_node_data[numa_domain].node_start_pfn) init_node_data[numa_domain].node_start_pfn +
init_node_data[numa_domain].node_start_pfn = init_node_data[numa_domain].node_spanned_pages;
start / PAGE_SIZE; if (shouldstart != (start / PAGE_SIZE)) {
else /* Revert to non-numa for now */
init_node_data[numa_domain].node_end_pfn = printk(KERN_ERR
(start / PAGE_SIZE) + "WARNING: Unexpected node layout: "
(size / PAGE_SIZE); "region start %lx length %lx\n",
start, size);
init_node_data[numa_domain].node_present_pages += printk(KERN_ERR "NUMA is disabled\n");
goto err;
}
init_node_data[numa_domain].node_spanned_pages +=
size / PAGE_SIZE; size / PAGE_SIZE;
} else { } else {
node_set_online(numa_domain); node_set_online(numa_domain);
init_node_data[numa_domain].node_start_pfn = init_node_data[numa_domain].node_start_pfn =
start / PAGE_SIZE; start / PAGE_SIZE;
init_node_data[numa_domain].node_end_pfn = init_node_data[numa_domain].node_spanned_pages =
init_node_data[numa_domain].node_start_pfn +
size / PAGE_SIZE; size / PAGE_SIZE;
} }
...@@ -406,6 +391,14 @@ static int __init parse_numa_properties(void) ...@@ -406,6 +391,14 @@ static int __init parse_numa_properties(void)
node_set_online(i); node_set_online(i);
return 0; return 0;
err:
/* Something has gone wrong; revert any setup we've done */
for_each_node(i) {
node_set_offline(i);
init_node_data[i].node_start_pfn = 0;
init_node_data[i].node_spanned_pages = 0;
}
return -1;
} }
static void __init setup_nonnuma(void) static void __init setup_nonnuma(void)
...@@ -433,11 +426,12 @@ static void __init setup_nonnuma(void) ...@@ -433,11 +426,12 @@ static void __init setup_nonnuma(void)
node_set_online(0); node_set_online(0);
init_node_data[0].node_start_pfn = 0; init_node_data[0].node_start_pfn = 0;
init_node_data[0].node_end_pfn = lmb_end_of_DRAM() / PAGE_SIZE; init_node_data[0].node_spanned_pages = lmb_end_of_DRAM() / PAGE_SIZE;
init_node_data[0].node_present_pages = total_ram / PAGE_SIZE;
for (i = 0 ; i < top_of_ram; i += MEMORY_INCREMENT) for (i = 0 ; i < top_of_ram; i += MEMORY_INCREMENT)
numa_memory_lookup_table[i >> MEMORY_INCREMENT_SHIFT] = 0; numa_memory_lookup_table[i >> MEMORY_INCREMENT_SHIFT] = 0;
node0_io_hole_size = top_of_ram - total_ram;
} }
static void __init dump_numa_topology(void) static void __init dump_numa_topology(void)
...@@ -518,8 +512,6 @@ static unsigned long careful_allocation(int nid, unsigned long size, ...@@ -518,8 +512,6 @@ static unsigned long careful_allocation(int nid, unsigned long size,
void __init do_init_bootmem(void) void __init do_init_bootmem(void)
{ {
int nid; int nid;
int addr_cells, size_cells;
struct device_node *memory = NULL;
static struct notifier_block ppc64_numa_nb = { static struct notifier_block ppc64_numa_nb = {
.notifier_call = cpu_numa_callback, .notifier_call = cpu_numa_callback,
.priority = 1 /* Must run before sched domains notifier. */ .priority = 1 /* Must run before sched domains notifier. */
...@@ -543,7 +535,7 @@ void __init do_init_bootmem(void) ...@@ -543,7 +535,7 @@ void __init do_init_bootmem(void)
unsigned long bootmap_pages; unsigned long bootmap_pages;
start_paddr = init_node_data[nid].node_start_pfn * PAGE_SIZE; start_paddr = init_node_data[nid].node_start_pfn * PAGE_SIZE;
end_paddr = init_node_data[nid].node_end_pfn * PAGE_SIZE; end_paddr = start_paddr + (init_node_data[nid].node_spanned_pages * PAGE_SIZE);
/* Allocate the node structure node local if possible */ /* Allocate the node structure node local if possible */
NODE_DATA(nid) = (struct pglist_data *)careful_allocation(nid, NODE_DATA(nid) = (struct pglist_data *)careful_allocation(nid,
...@@ -559,9 +551,9 @@ void __init do_init_bootmem(void) ...@@ -559,9 +551,9 @@ void __init do_init_bootmem(void)
NODE_DATA(nid)->node_start_pfn = NODE_DATA(nid)->node_start_pfn =
init_node_data[nid].node_start_pfn; init_node_data[nid].node_start_pfn;
NODE_DATA(nid)->node_spanned_pages = NODE_DATA(nid)->node_spanned_pages =
end_paddr - start_paddr; init_node_data[nid].node_spanned_pages;
if (NODE_DATA(nid)->node_spanned_pages == 0) if (init_node_data[nid].node_spanned_pages == 0)
continue; continue;
dbg("start_paddr = %lx\n", start_paddr); dbg("start_paddr = %lx\n", start_paddr);
...@@ -580,50 +572,33 @@ void __init do_init_bootmem(void) ...@@ -580,50 +572,33 @@ void __init do_init_bootmem(void)
start_paddr >> PAGE_SHIFT, start_paddr >> PAGE_SHIFT,
end_paddr >> PAGE_SHIFT); end_paddr >> PAGE_SHIFT);
/* for (i = 0; i < lmb.memory.cnt; i++) {
* We need to do another scan of all memory sections to unsigned long physbase, size;
* associate memory with the correct node.
*/
addr_cells = get_mem_addr_cells();
size_cells = get_mem_size_cells();
memory = NULL;
while ((memory = of_find_node_by_type(memory, "memory")) != NULL) {
unsigned long mem_start, mem_size;
int numa_domain;
unsigned int *memcell_buf;
unsigned int len;
memcell_buf = (unsigned int *)get_property(memory, "reg", &len); physbase = lmb.memory.region[i].physbase;
if (!memcell_buf || len <= 0) size = lmb.memory.region[i].size;
continue;
mem_start = read_n_cells(addr_cells, &memcell_buf); if (physbase < end_paddr &&
mem_size = read_n_cells(size_cells, &memcell_buf); (physbase+size) > start_paddr) {
numa_domain = of_node_numa_domain(memory); /* overlaps */
if (physbase < start_paddr) {
size -= start_paddr - physbase;
physbase = start_paddr;
}
if (numa_domain != nid) if (size > end_paddr - physbase)
continue; size = end_paddr - physbase;
if (mem_start < end_paddr && dbg("free_bootmem %lx %lx\n", physbase, size);
(mem_start+mem_size) > start_paddr) { free_bootmem_node(NODE_DATA(nid), physbase,
/* should be no overlaps ! */ size);
dbg("free_bootmem %lx %lx\n", mem_start, mem_size);
free_bootmem_node(NODE_DATA(nid), mem_start,
mem_size);
} }
} }
/*
* Mark reserved regions on this node
*/
for (i = 0; i < lmb.reserved.cnt; i++) { for (i = 0; i < lmb.reserved.cnt; i++) {
unsigned long physbase = lmb.reserved.region[i].physbase; unsigned long physbase = lmb.reserved.region[i].physbase;
unsigned long size = lmb.reserved.region[i].size; unsigned long size = lmb.reserved.region[i].size;
if (pa_to_nid(physbase) != nid &&
pa_to_nid(physbase+size-1) != nid)
continue;
if (physbase < end_paddr && if (physbase < end_paddr &&
(physbase+size) > start_paddr) { (physbase+size) > start_paddr) {
/* overlaps */ /* overlaps */
...@@ -657,12 +632,13 @@ void __init paging_init(void) ...@@ -657,12 +632,13 @@ void __init paging_init(void)
unsigned long start_pfn; unsigned long start_pfn;
unsigned long end_pfn; unsigned long end_pfn;
start_pfn = init_node_data[nid].node_start_pfn; start_pfn = plat_node_bdata[nid].node_boot_start >> PAGE_SHIFT;
end_pfn = init_node_data[nid].node_end_pfn; end_pfn = plat_node_bdata[nid].node_low_pfn;
zones_size[ZONE_DMA] = end_pfn - start_pfn; zones_size[ZONE_DMA] = end_pfn - start_pfn;
zholes_size[ZONE_DMA] = zones_size[ZONE_DMA] - zholes_size[ZONE_DMA] = 0;
init_node_data[nid].node_present_pages; if (nid == 0)
zholes_size[ZONE_DMA] = node0_io_hole_size >> PAGE_SHIFT;
dbg("free_area_init node %d %lx %lx (hole: %lx)\n", nid, dbg("free_area_init node %d %lx %lx (hole: %lx)\n", nid,
zones_size[ZONE_DMA], start_pfn, zholes_size[ZONE_DMA]); zones_size[ZONE_DMA], start_pfn, zholes_size[ZONE_DMA]);
......
...@@ -78,7 +78,7 @@ config AGP_AMD64 ...@@ -78,7 +78,7 @@ config AGP_AMD64
config AGP_INTEL config AGP_INTEL
tristate "Intel 440LX/BX/GX, I8xx and E7x05 chipset support" tristate "Intel 440LX/BX/GX, I8xx and E7x05 chipset support"
depends on AGP && X86 && !X86_64 depends on AGP && X86
help help
This option gives you AGP support for the GLX component of XFree86 4.x This option gives you AGP support for the GLX component of XFree86 4.x
on Intel 440LX/BX/GX, 815, 820, 830, 840, 845, 850, 860, 875, on Intel 440LX/BX/GX, 815, 820, 830, 840, 845, 850, 860, 875,
...@@ -162,3 +162,10 @@ config AGP_EFFICEON ...@@ -162,3 +162,10 @@ config AGP_EFFICEON
You should say Y here if you use XFree86 3.3.6 or 4.x and want to You should say Y here if you use XFree86 3.3.6 or 4.x and want to
use GLX or DRI. If unsure, say Y. use GLX or DRI. If unsure, say Y.
config AGP_SGI_TIOCA
tristate "SGI TIO chipset AGP support"
depends on AGP && (IA64_SGI_SN2 || IA64_GENERIC)
help
This option gives you AGP GART support for the SGI TIO chipset
for IA64 processors.
...@@ -11,8 +11,8 @@ obj-$(CONFIG_AGP_HP_ZX1) += hp-agp.o ...@@ -11,8 +11,8 @@ obj-$(CONFIG_AGP_HP_ZX1) += hp-agp.o
obj-$(CONFIG_AGP_I460) += i460-agp.o obj-$(CONFIG_AGP_I460) += i460-agp.o
obj-$(CONFIG_AGP_INTEL) += intel-agp.o obj-$(CONFIG_AGP_INTEL) += intel-agp.o
obj-$(CONFIG_AGP_NVIDIA) += nvidia-agp.o obj-$(CONFIG_AGP_NVIDIA) += nvidia-agp.o
obj-$(CONFIG_AGP_SGI_TIOCA) += sgi-agp.o
obj-$(CONFIG_AGP_SIS) += sis-agp.o obj-$(CONFIG_AGP_SIS) += sis-agp.o
obj-$(CONFIG_AGP_SWORKS) += sworks-agp.o obj-$(CONFIG_AGP_SWORKS) += sworks-agp.o
obj-$(CONFIG_AGP_UNINORTH) += uninorth-agp.o obj-$(CONFIG_AGP_UNINORTH) += uninorth-agp.o
obj-$(CONFIG_AGP_VIA) += via-agp.o obj-$(CONFIG_AGP_VIA) += via-agp.o
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2003-2005 Silicon Graphics, Inc. All Rights Reserved.
*/
/*
* SGI TIOCA AGPGART routines.
*
*/
#include <linux/acpi.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/agp_backend.h>
#include <asm/sn/addrs.h>
#include <asm/sn/pcidev.h>
#include <asm/sn/pcibus_provider_defs.h>
#include <asm/sn/tioca_provider.h>
#include "agp.h"
extern int agp_memory_reserved;
extern uint32_t tioca_gart_found;
extern struct list_head tioca_list;
static struct agp_bridge_data **sgi_tioca_agp_bridges;
/*
* The aperature size and related information is set up at TIOCA init time.
* Values for this table will be extracted and filled in at
* sgi_tioca_fetch_size() time.
*/
static struct aper_size_info_fixed sgi_tioca_sizes[] = {
{0, 0, 0},
};
static void *sgi_tioca_alloc_page(struct agp_bridge_data *bridge)
{
struct page *page;
int nid;
struct tioca_kernel *info =
(struct tioca_kernel *)bridge->dev_private_data;
nid = info->ca_closest_node;
page = alloc_pages_node(nid, GFP_KERNEL, 0);
if (page == NULL) {
return 0;
}
get_page(page);
SetPageLocked(page);
atomic_inc(&agp_bridge->current_memory_agp);
return page_address(page);
}
/*
* Flush GART tlb's. Cannot selectively flush based on memory so the mem
* arg is ignored.
*/
static void sgi_tioca_tlbflush(struct agp_memory *mem)
{
tioca_tlbflush(mem->bridge->dev_private_data);
}
/*
* Given an address of a host physical page, turn it into a valid gart
* entry.
*/
static unsigned long
sgi_tioca_mask_memory(struct agp_bridge_data *bridge,
unsigned long addr, int type)
{
return tioca_physpage_to_gart(addr);
}
static void sgi_tioca_agp_enable(struct agp_bridge_data *bridge, u32 mode)
{
tioca_fastwrite_enable(bridge->dev_private_data);
}
/*
* sgi_tioca_configure() doesn't have anything to do since the base CA driver
* has alreay set up the GART.
*/
static int sgi_tioca_configure(void)
{
return 0;
}
/*
* Determine gfx aperature size. This has already been determined by the
* CA driver init, so just need to set agp_bridge values accordingly.
*/
static int sgi_tioca_fetch_size(void)
{
struct tioca_kernel *info =
(struct tioca_kernel *)agp_bridge->dev_private_data;
sgi_tioca_sizes[0].size = info->ca_gfxap_size / MB(1);
sgi_tioca_sizes[0].num_entries = info->ca_gfxgart_entries;
return sgi_tioca_sizes[0].size;
}
static int sgi_tioca_create_gatt_table(struct agp_bridge_data *bridge)
{
struct tioca_kernel *info =
(struct tioca_kernel *)bridge->dev_private_data;
bridge->gatt_table_real = (u32 *) info->ca_gfxgart;
bridge->gatt_table = bridge->gatt_table_real;
bridge->gatt_bus_addr = info->ca_gfxgart_base;
return 0;
}
static int sgi_tioca_free_gatt_table(struct agp_bridge_data *bridge)
{
return 0;
}
static int sgi_tioca_insert_memory(struct agp_memory *mem, off_t pg_start,
int type)
{
int num_entries;
size_t i;
off_t j;
void *temp;
struct agp_bridge_data *bridge;
bridge = mem->bridge;
if (!bridge)
return -EINVAL;
temp = bridge->current_size;
switch (bridge->driver->size_type) {
case U8_APER_SIZE:
num_entries = A_SIZE_8(temp)->num_entries;
break;
case U16_APER_SIZE:
num_entries = A_SIZE_16(temp)->num_entries;
break;
case U32_APER_SIZE:
num_entries = A_SIZE_32(temp)->num_entries;
break;
case FIXED_APER_SIZE:
num_entries = A_SIZE_FIX(temp)->num_entries;
break;
case LVL2_APER_SIZE:
return -EINVAL;
break;
default:
num_entries = 0;
break;
}
num_entries -= agp_memory_reserved / PAGE_SIZE;
if (num_entries < 0)
num_entries = 0;
if (type != 0 || mem->type != 0) {
return -EINVAL;
}
if ((pg_start + mem->page_count) > num_entries)
return -EINVAL;
j = pg_start;
while (j < (pg_start + mem->page_count)) {
if (*(bridge->gatt_table + j))
return -EBUSY;
j++;
}
if (mem->is_flushed == FALSE) {
bridge->driver->cache_flush();
mem->is_flushed = TRUE;
}
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
*(bridge->gatt_table + j) =
bridge->driver->mask_memory(bridge, mem->memory[i],
mem->type);
}
bridge->driver->tlb_flush(mem);
return 0;
}
static int sgi_tioca_remove_memory(struct agp_memory *mem, off_t pg_start,
int type)
{
size_t i;
struct agp_bridge_data *bridge;
bridge = mem->bridge;
if (!bridge)
return -EINVAL;
if (type != 0 || mem->type != 0) {
return -EINVAL;
}
for (i = pg_start; i < (mem->page_count + pg_start); i++) {
*(bridge->gatt_table + i) = 0;
}
bridge->driver->tlb_flush(mem);
return 0;
}
static void sgi_tioca_cache_flush(void)
{
}
/*
* Cleanup. Nothing to do as the CA driver owns the GART.
*/
static void sgi_tioca_cleanup(void)
{
}
static struct agp_bridge_data *sgi_tioca_find_bridge(struct pci_dev *pdev)
{
struct agp_bridge_data *bridge;
list_for_each_entry(bridge, &agp_bridges, list) {
if (bridge->dev->bus == pdev->bus)
break;
}
return bridge;
}
struct agp_bridge_driver sgi_tioca_driver = {
.owner = THIS_MODULE,
.size_type = U16_APER_SIZE,
.configure = sgi_tioca_configure,
.fetch_size = sgi_tioca_fetch_size,
.cleanup = sgi_tioca_cleanup,
.tlb_flush = sgi_tioca_tlbflush,
.mask_memory = sgi_tioca_mask_memory,
.agp_enable = sgi_tioca_agp_enable,
.cache_flush = sgi_tioca_cache_flush,
.create_gatt_table = sgi_tioca_create_gatt_table,
.free_gatt_table = sgi_tioca_free_gatt_table,
.insert_memory = sgi_tioca_insert_memory,
.remove_memory = sgi_tioca_remove_memory,
.alloc_by_type = agp_generic_alloc_by_type,
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = sgi_tioca_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
.cant_use_aperture = 1,
.needs_scratch_page = 0,
.num_aperture_sizes = 1,
};
static int __devinit agp_sgi_init(void)
{
unsigned int j;
struct tioca_kernel *info;
struct pci_dev *pdev = NULL;
if (tioca_gart_found)
printk(KERN_INFO PFX "SGI TIO CA GART driver initialized.\n");
else
return 0;
sgi_tioca_agp_bridges =
(struct agp_bridge_data **)kmalloc(tioca_gart_found *
sizeof(struct agp_bridge_data *),
GFP_KERNEL);
j = 0;
list_for_each_entry(info, &tioca_list, ca_list) {
struct list_head *tmp;
list_for_each(tmp, info->ca_devices) {
u8 cap_ptr;
pdev = pci_dev_b(tmp);
if (pdev->class != (PCI_CLASS_DISPLAY_VGA << 8))
continue;
cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
if (!cap_ptr)
continue;
}
sgi_tioca_agp_bridges[j] = agp_alloc_bridge();
printk(KERN_INFO PFX "bridge %d = 0x%p\n", j,
sgi_tioca_agp_bridges[j]);
if (sgi_tioca_agp_bridges[j]) {
sgi_tioca_agp_bridges[j]->dev = pdev;
sgi_tioca_agp_bridges[j]->dev_private_data = info;
sgi_tioca_agp_bridges[j]->driver = &sgi_tioca_driver;
sgi_tioca_agp_bridges[j]->gart_bus_addr =
info->ca_gfxap_base;
sgi_tioca_agp_bridges[j]->mode = (0x7D << 24) | /* 126 requests */
(0x1 << 9) | /* SBA supported */
(0x1 << 5) | /* 64-bit addresses supported */
(0x1 << 4) | /* FW supported */
(0x1 << 3) | /* AGP 3.0 mode */
0x2; /* 8x transfer only */
sgi_tioca_agp_bridges[j]->current_size =
sgi_tioca_agp_bridges[j]->previous_size =
(void *)&sgi_tioca_sizes[0];
agp_add_bridge(sgi_tioca_agp_bridges[j]);
}
j++;
}
agp_find_bridge = &sgi_tioca_find_bridge;
return 0;
}
static void __devexit agp_sgi_cleanup(void)
{
if(sgi_tioca_agp_bridges)
kfree(sgi_tioca_agp_bridges);
sgi_tioca_agp_bridges=NULL;
}
module_init(agp_sgi_init);
module_exit(agp_sgi_cleanup);
MODULE_LICENSE("GPL and additional rights");
...@@ -27,8 +27,6 @@ ...@@ -27,8 +27,6 @@
#include <linux/if_fc.h> #include <linux/if_fc.h>
#ifdef __KERNEL__ #ifdef __KERNEL__
extern unsigned short fc_type_trans(struct sk_buff *skb, struct net_device *dev);
extern struct net_device *alloc_fcdev(int sizeof_priv); extern struct net_device *alloc_fcdev(int sizeof_priv);
#endif #endif
......
...@@ -182,19 +182,15 @@ void clear_page_range(struct mmu_gather *tlb, ...@@ -182,19 +182,15 @@ void clear_page_range(struct mmu_gather *tlb,
unsigned long addr, unsigned long end) unsigned long addr, unsigned long end)
{ {
pgd_t *pgd; pgd_t *pgd;
unsigned long i, next; unsigned long next;
pgd = pgd_offset(tlb->mm, addr); pgd = pgd_offset(tlb->mm, addr);
for (i = pgd_index(addr); i <= pgd_index(end-1); i++) { do {
next = pgd_addr_end(addr, end); next = pgd_addr_end(addr, end);
if (pgd_none_or_clear_bad(pgd)) if (pgd_none_or_clear_bad(pgd))
continue; continue;
clear_pud_range(tlb, pgd, addr, next); clear_pud_range(tlb, pgd, addr, next);
pgd++; } while (pgd++, addr = next, addr != end);
addr = next;
if (addr == end)
break;
}
} }
pte_t fastcall * pte_alloc_map(struct mm_struct *mm, pmd_t *pmd, unsigned long address) pte_t fastcall * pte_alloc_map(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
......
...@@ -97,40 +97,6 @@ static int fc_rebuild_header(struct sk_buff *skb) ...@@ -97,40 +97,6 @@ static int fc_rebuild_header(struct sk_buff *skb)
#endif #endif
} }
unsigned short
fc_type_trans(struct sk_buff *skb, struct net_device *dev)
{
struct fch_hdr *fch = (struct fch_hdr *)skb->data;
struct fcllc *fcllc;
skb->mac.raw = skb->data;
fcllc = (struct fcllc *)(skb->data + sizeof (struct fch_hdr) + 2);
skb_pull(skb, sizeof (struct fch_hdr) + 2);
if (*fch->daddr & 1) {
if (!memcmp(fch->daddr, dev->broadcast, FC_ALEN))
skb->pkt_type = PACKET_BROADCAST;
else
skb->pkt_type = PACKET_MULTICAST;
} else if (dev->flags & IFF_PROMISC) {
if (memcmp(fch->daddr, dev->dev_addr, FC_ALEN))
skb->pkt_type = PACKET_OTHERHOST;
}
/*
* Strip the SNAP header from ARP packets since we don't pass
* them through to the 802.2/SNAP layers.
*/
if (fcllc->dsap == EXTENDED_SAP &&
(fcllc->ethertype == ntohs(ETH_P_IP) ||
fcllc->ethertype == ntohs(ETH_P_ARP))) {
skb_pull(skb, sizeof (struct fcllc));
return fcllc->ethertype;
}
return ntohs(ETH_P_802_2);
}
static void fc_setup(struct net_device *dev) static void fc_setup(struct net_device *dev)
{ {
dev->hard_header = fc_header; dev->hard_header = fc_header;
......
...@@ -3343,6 +3343,7 @@ EXPORT_SYMBOL(unregister_netdevice); ...@@ -3343,6 +3343,7 @@ EXPORT_SYMBOL(unregister_netdevice);
EXPORT_SYMBOL(unregister_netdevice_notifier); EXPORT_SYMBOL(unregister_netdevice_notifier);
EXPORT_SYMBOL(net_enable_timestamp); EXPORT_SYMBOL(net_enable_timestamp);
EXPORT_SYMBOL(net_disable_timestamp); EXPORT_SYMBOL(net_disable_timestamp);
EXPORT_SYMBOL(dev_get_flags);
#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
EXPORT_SYMBOL(br_handle_frame_hook); EXPORT_SYMBOL(br_handle_frame_hook);
......
...@@ -1868,7 +1868,7 @@ static inline unsigned int dn_current_mss(struct sock *sk, int flags) ...@@ -1868,7 +1868,7 @@ static inline unsigned int dn_current_mss(struct sock *sk, int flags)
/* This works out the maximum size of segment we can send out */ /* This works out the maximum size of segment we can send out */
if (dst) { if (dst) {
u32 mtu = dst_pmtu(dst); u32 mtu = dst_mtu(dst);
mss_now = min_t(int, dn_mss_from_pmtu(dst->dev, mtu), mss_now); mss_now = min_t(int, dn_mss_from_pmtu(dst->dev, mtu), mss_now);
} }
......
...@@ -817,7 +817,7 @@ static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res) ...@@ -817,7 +817,7 @@ static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res)
if (rt->u.dst.metrics[RTAX_MTU-1] == 0 || if (rt->u.dst.metrics[RTAX_MTU-1] == 0 ||
rt->u.dst.metrics[RTAX_MTU-1] > rt->u.dst.dev->mtu) rt->u.dst.metrics[RTAX_MTU-1] > rt->u.dst.dev->mtu)
rt->u.dst.metrics[RTAX_MTU-1] = rt->u.dst.dev->mtu; rt->u.dst.metrics[RTAX_MTU-1] = rt->u.dst.dev->mtu;
mss = dn_mss_from_pmtu(dev, dst_pmtu(&rt->u.dst)); mss = dn_mss_from_pmtu(dev, dst_mtu(&rt->u.dst));
if (rt->u.dst.metrics[RTAX_ADVMSS-1] == 0 || if (rt->u.dst.metrics[RTAX_ADVMSS-1] == 0 ||
rt->u.dst.metrics[RTAX_ADVMSS-1] > mss) rt->u.dst.metrics[RTAX_ADVMSS-1] > mss)
rt->u.dst.metrics[RTAX_ADVMSS-1] = mss; rt->u.dst.metrics[RTAX_ADVMSS-1] = mss;
......
...@@ -511,7 +511,7 @@ static void ipgre_err(struct sk_buff *skb, u32 info) ...@@ -511,7 +511,7 @@ static void ipgre_err(struct sk_buff *skb, u32 info)
/* change mtu on this route */ /* change mtu on this route */
if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) { if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
if (rel_info > dst_pmtu(skb2->dst)) { if (rel_info > dst_mtu(skb2->dst)) {
kfree_skb(skb2); kfree_skb(skb2);
return; return;
} }
...@@ -764,9 +764,9 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -764,9 +764,9 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
df = tiph->frag_off; df = tiph->frag_off;
if (df) if (df)
mtu = dst_pmtu(&rt->u.dst) - tunnel->hlen; mtu = dst_mtu(&rt->u.dst) - tunnel->hlen;
else else
mtu = skb->dst ? dst_pmtu(skb->dst) : dev->mtu; mtu = skb->dst ? dst_mtu(skb->dst) : dev->mtu;
if (skb->dst) if (skb->dst)
skb->dst->ops->update_pmtu(skb->dst, mtu); skb->dst->ops->update_pmtu(skb->dst, mtu);
...@@ -785,7 +785,7 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -785,7 +785,7 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
else if (skb->protocol == htons(ETH_P_IPV6)) { else if (skb->protocol == htons(ETH_P_IPV6)) {
struct rt6_info *rt6 = (struct rt6_info*)skb->dst; struct rt6_info *rt6 = (struct rt6_info*)skb->dst;
if (rt6 && mtu < dst_pmtu(skb->dst) && mtu >= IPV6_MIN_MTU) { if (rt6 && mtu < dst_mtu(skb->dst) && mtu >= IPV6_MIN_MTU) {
if ((tunnel->parms.iph.daddr && !MULTICAST(tunnel->parms.iph.daddr)) || if ((tunnel->parms.iph.daddr && !MULTICAST(tunnel->parms.iph.daddr)) ||
rt6->rt6i_dst.plen == 128) { rt6->rt6i_dst.plen == 128) {
rt6->rt6i_flags |= RTF_MODIFIED; rt6->rt6i_flags |= RTF_MODIFIED;
......
...@@ -278,7 +278,7 @@ int ip_mc_output(struct sk_buff *skb) ...@@ -278,7 +278,7 @@ int ip_mc_output(struct sk_buff *skb)
newskb->dev, ip_dev_loopback_xmit); newskb->dev, ip_dev_loopback_xmit);
} }
if (skb->len > dst_pmtu(&rt->u.dst)) if (skb->len > dst_mtu(&rt->u.dst))
return ip_fragment(skb, ip_finish_output); return ip_fragment(skb, ip_finish_output);
else else
return ip_finish_output(skb); return ip_finish_output(skb);
...@@ -288,7 +288,7 @@ int ip_output(struct sk_buff *skb) ...@@ -288,7 +288,7 @@ int ip_output(struct sk_buff *skb)
{ {
IP_INC_STATS(IPSTATS_MIB_OUTREQUESTS); IP_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
if (skb->len > dst_pmtu(skb->dst) && !skb_shinfo(skb)->tso_size) if (skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->tso_size)
return ip_fragment(skb, ip_finish_output); return ip_fragment(skb, ip_finish_output);
else else
return ip_finish_output(skb); return ip_finish_output(skb);
...@@ -448,7 +448,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*)) ...@@ -448,7 +448,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
if (unlikely((iph->frag_off & htons(IP_DF)) && !skb->local_df)) { if (unlikely((iph->frag_off & htons(IP_DF)) && !skb->local_df)) {
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
htonl(dst_pmtu(&rt->u.dst))); htonl(dst_mtu(&rt->u.dst)));
kfree_skb(skb); kfree_skb(skb);
return -EMSGSIZE; return -EMSGSIZE;
} }
...@@ -458,7 +458,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*)) ...@@ -458,7 +458,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
*/ */
hlen = iph->ihl * 4; hlen = iph->ihl * 4;
mtu = dst_pmtu(&rt->u.dst) - hlen; /* Size of data space */ mtu = dst_mtu(&rt->u.dst) - hlen; /* Size of data space */
/* When frag_list is given, use it. First, check its validity: /* When frag_list is given, use it. First, check its validity:
* some transformers could create wrong frag_list or break existing * some transformers could create wrong frag_list or break existing
...@@ -719,7 +719,6 @@ int ip_append_data(struct sock *sk, ...@@ -719,7 +719,6 @@ int ip_append_data(struct sock *sk,
struct ip_options *opt = NULL; struct ip_options *opt = NULL;
int hh_len; int hh_len;
int exthdrlen;
int mtu; int mtu;
int copy; int copy;
int err; int err;
...@@ -746,22 +745,17 @@ int ip_append_data(struct sock *sk, ...@@ -746,22 +745,17 @@ int ip_append_data(struct sock *sk,
inet->cork.addr = ipc->addr; inet->cork.addr = ipc->addr;
} }
dst_hold(&rt->u.dst); dst_hold(&rt->u.dst);
inet->cork.fragsize = mtu = dst_pmtu(&rt->u.dst); inet->cork.fragsize = mtu = dst_mtu(&rt->u.dst);
inet->cork.rt = rt; inet->cork.rt = rt;
inet->cork.length = 0; inet->cork.length = 0;
sk->sk_sndmsg_page = NULL; sk->sk_sndmsg_page = NULL;
sk->sk_sndmsg_off = 0; sk->sk_sndmsg_off = 0;
if ((exthdrlen = rt->u.dst.header_len) != 0) {
length += exthdrlen;
transhdrlen += exthdrlen;
}
} else { } else {
rt = inet->cork.rt; rt = inet->cork.rt;
if (inet->cork.flags & IPCORK_OPT) if (inet->cork.flags & IPCORK_OPT)
opt = inet->cork.opt; opt = inet->cork.opt;
transhdrlen = 0; transhdrlen = 0;
exthdrlen = 0;
mtu = inet->cork.fragsize; mtu = inet->cork.fragsize;
} }
hh_len = LL_RESERVED_SPACE(rt->u.dst.dev); hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
...@@ -770,7 +764,7 @@ int ip_append_data(struct sock *sk, ...@@ -770,7 +764,7 @@ int ip_append_data(struct sock *sk,
maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen; maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
if (inet->cork.length + length > 0xFFFF - fragheaderlen) { if (inet->cork.length + length > 0xFFFF - fragheaderlen) {
ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport, mtu-exthdrlen); ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport, mtu);
return -EMSGSIZE; return -EMSGSIZE;
} }
...@@ -781,7 +775,7 @@ int ip_append_data(struct sock *sk, ...@@ -781,7 +775,7 @@ int ip_append_data(struct sock *sk,
if (transhdrlen && if (transhdrlen &&
length + fragheaderlen <= mtu && length + fragheaderlen <= mtu &&
rt->u.dst.dev->features&(NETIF_F_IP_CSUM|NETIF_F_NO_CSUM|NETIF_F_HW_CSUM) && rt->u.dst.dev->features&(NETIF_F_IP_CSUM|NETIF_F_NO_CSUM|NETIF_F_HW_CSUM) &&
!exthdrlen) !rt->u.dst.header_len)
csummode = CHECKSUM_HW; csummode = CHECKSUM_HW;
inet->cork.length += length; inet->cork.length += length;
...@@ -866,9 +860,9 @@ int ip_append_data(struct sock *sk, ...@@ -866,9 +860,9 @@ int ip_append_data(struct sock *sk,
* Find where to start putting bytes. * Find where to start putting bytes.
*/ */
data = skb_put(skb, fraglen); data = skb_put(skb, fraglen);
skb->nh.raw = data + exthdrlen; skb->nh.raw = data;
data += fragheaderlen; data += fragheaderlen;
skb->h.raw = data + exthdrlen; skb->h.raw = data;
if (fraggap) { if (fraggap) {
skb->csum = skb_copy_and_csum_bits( skb->csum = skb_copy_and_csum_bits(
...@@ -890,7 +884,6 @@ int ip_append_data(struct sock *sk, ...@@ -890,7 +884,6 @@ int ip_append_data(struct sock *sk,
offset += copy; offset += copy;
length -= datalen - fraggap; length -= datalen - fraggap;
transhdrlen = 0; transhdrlen = 0;
exthdrlen = 0;
csummode = CHECKSUM_NONE; csummode = CHECKSUM_NONE;
/* /*
......
...@@ -955,7 +955,7 @@ int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval, ...@@ -955,7 +955,7 @@ int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
val = 0; val = 0;
dst = sk_dst_get(sk); dst = sk_dst_get(sk);
if (dst) { if (dst) {
val = dst_pmtu(dst) - dst->header_len; val = dst_mtu(dst);
dst_release(dst); dst_release(dst);
} }
if (!val) { if (!val) {
......
...@@ -436,7 +436,7 @@ static void ipip_err(struct sk_buff *skb, void *__unused) ...@@ -436,7 +436,7 @@ static void ipip_err(struct sk_buff *skb, void *__unused)
/* change mtu on this route */ /* change mtu on this route */
if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) { if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
if (rel_info > dst_pmtu(skb2->dst)) { if (rel_info > dst_mtu(skb2->dst)) {
kfree_skb(skb2); kfree_skb(skb2);
return; return;
} }
...@@ -569,9 +569,9 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -569,9 +569,9 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
} }
if (tiph->frag_off) if (tiph->frag_off)
mtu = dst_pmtu(&rt->u.dst) - sizeof(struct iphdr); mtu = dst_mtu(&rt->u.dst) - sizeof(struct iphdr);
else else
mtu = skb->dst ? dst_pmtu(skb->dst) : dev->mtu; mtu = skb->dst ? dst_mtu(skb->dst) : dev->mtu;
if (mtu < 68) { if (mtu < 68) {
tunnel->stat.collisions++; tunnel->stat.collisions++;
......
...@@ -147,7 +147,7 @@ static int ip6_output2(struct sk_buff *skb) ...@@ -147,7 +147,7 @@ static int ip6_output2(struct sk_buff *skb)
int ip6_output(struct sk_buff *skb) int ip6_output(struct sk_buff *skb)
{ {
if (skb->len > dst_pmtu(skb->dst) || dst_allfrag(skb->dst)) if (skb->len > dst_mtu(skb->dst) || dst_allfrag(skb->dst))
return ip6_fragment(skb, ip6_output2); return ip6_fragment(skb, ip6_output2);
else else
return ip6_output2(skb); return ip6_output2(skb);
...@@ -263,7 +263,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl, ...@@ -263,7 +263,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
ipv6_addr_copy(&hdr->saddr, &fl->fl6_src); ipv6_addr_copy(&hdr->saddr, &fl->fl6_src);
ipv6_addr_copy(&hdr->daddr, first_hop); ipv6_addr_copy(&hdr->daddr, first_hop);
mtu = dst_pmtu(dst); mtu = dst_mtu(dst);
if ((skb->len <= mtu) || ipfragok) { if ((skb->len <= mtu) || ipfragok) {
IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS); IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
return NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev, ip6_maybe_reroute); return NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev, ip6_maybe_reroute);
...@@ -429,10 +429,10 @@ int ip6_forward(struct sk_buff *skb) ...@@ -429,10 +429,10 @@ int ip6_forward(struct sk_buff *skb)
goto error; goto error;
} }
if (skb->len > dst_pmtu(dst)) { if (skb->len > dst_mtu(dst)) {
/* Again, force OUTPUT device used as source address */ /* Again, force OUTPUT device used as source address */
skb->dev = dst->dev; skb->dev = dst->dev;
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, dst_pmtu(dst), skb->dev); icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, dst_mtu(dst), skb->dev);
IP6_INC_STATS_BH(IPSTATS_MIB_INTOOBIGERRORS); IP6_INC_STATS_BH(IPSTATS_MIB_INTOOBIGERRORS);
IP6_INC_STATS_BH(IPSTATS_MIB_FRAGFAILS); IP6_INC_STATS_BH(IPSTATS_MIB_FRAGFAILS);
kfree_skb(skb); kfree_skb(skb);
...@@ -535,7 +535,7 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) ...@@ -535,7 +535,7 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
hlen = ip6_find_1stfragopt(skb, &prevhdr); hlen = ip6_find_1stfragopt(skb, &prevhdr);
nexthdr = *prevhdr; nexthdr = *prevhdr;
mtu = dst_pmtu(&rt->u.dst) - hlen - sizeof(struct frag_hdr); mtu = dst_mtu(&rt->u.dst) - hlen - sizeof(struct frag_hdr);
if (skb_shinfo(skb)->frag_list) { if (skb_shinfo(skb)->frag_list) {
int first_len = skb_pagelen(skb); int first_len = skb_pagelen(skb);
...@@ -850,13 +850,13 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, int offse ...@@ -850,13 +850,13 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, int offse
np->cork.rt = rt; np->cork.rt = rt;
inet->cork.fl = *fl; inet->cork.fl = *fl;
np->cork.hop_limit = hlimit; np->cork.hop_limit = hlimit;
inet->cork.fragsize = mtu = dst_pmtu(&rt->u.dst); inet->cork.fragsize = mtu = dst_mtu(&rt->u.dst);
if (dst_allfrag(&rt->u.dst)) if (dst_allfrag(&rt->u.dst))
inet->cork.flags |= IPCORK_ALLFRAG; inet->cork.flags |= IPCORK_ALLFRAG;
inet->cork.length = 0; inet->cork.length = 0;
sk->sk_sndmsg_page = NULL; sk->sk_sndmsg_page = NULL;
sk->sk_sndmsg_off = 0; sk->sk_sndmsg_off = 0;
exthdrlen = rt->u.dst.header_len + (opt ? opt->opt_flen : 0); exthdrlen = opt ? opt->opt_flen : 0;
length += exthdrlen; length += exthdrlen;
transhdrlen += exthdrlen; transhdrlen += exthdrlen;
} else { } else {
......
...@@ -689,14 +689,14 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -689,14 +689,14 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
t->parms.name); t->parms.name);
goto tx_err_dst_release; goto tx_err_dst_release;
} }
mtu = dst_pmtu(dst) - sizeof (*ipv6h); mtu = dst_mtu(dst) - sizeof (*ipv6h);
if (opt) { if (opt) {
max_headroom += 8; max_headroom += 8;
mtu -= 8; mtu -= 8;
} }
if (mtu < IPV6_MIN_MTU) if (mtu < IPV6_MIN_MTU)
mtu = IPV6_MIN_MTU; mtu = IPV6_MIN_MTU;
if (skb->dst && mtu < dst_pmtu(skb->dst)) { if (skb->dst && mtu < dst_mtu(skb->dst)) {
struct rt6_info *rt = (struct rt6_info *) skb->dst; struct rt6_info *rt = (struct rt6_info *) skb->dst;
rt->rt6i_flags |= RTF_MODIFIED; rt->rt6i_flags |= RTF_MODIFIED;
rt->u.dst.metrics[RTAX_MTU-1] = mtu; rt->u.dst.metrics[RTAX_MTU-1] = mtu;
......
...@@ -607,7 +607,7 @@ int ipv6_getsockopt(struct sock *sk, int level, int optname, ...@@ -607,7 +607,7 @@ int ipv6_getsockopt(struct sock *sk, int level, int optname,
lock_sock(sk); lock_sock(sk);
dst = sk_dst_get(sk); dst = sk_dst_get(sk);
if (dst) { if (dst) {
val = dst_pmtu(dst) - dst->header_len; val = dst_mtu(dst);
dst_release(dst); dst_release(dst);
} }
release_sock(sk); release_sock(sk);
......
...@@ -625,7 +625,7 @@ static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu) ...@@ -625,7 +625,7 @@ static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
{ {
struct rt6_info *rt6 = (struct rt6_info*)dst; struct rt6_info *rt6 = (struct rt6_info*)dst;
if (mtu < dst_pmtu(dst) && rt6->rt6i_dst.plen == 128) { if (mtu < dst_mtu(dst) && rt6->rt6i_dst.plen == 128) {
rt6->rt6i_flags |= RTF_MODIFIED; rt6->rt6i_flags |= RTF_MODIFIED;
if (mtu < IPV6_MIN_MTU) { if (mtu < IPV6_MIN_MTU) {
mtu = IPV6_MIN_MTU; mtu = IPV6_MIN_MTU;
...@@ -686,7 +686,7 @@ struct dst_entry *ndisc_dst_alloc(struct net_device *dev, ...@@ -686,7 +686,7 @@ struct dst_entry *ndisc_dst_alloc(struct net_device *dev,
atomic_set(&rt->u.dst.__refcnt, 1); atomic_set(&rt->u.dst.__refcnt, 1);
rt->u.dst.metrics[RTAX_HOPLIMIT-1] = 255; rt->u.dst.metrics[RTAX_HOPLIMIT-1] = 255;
rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(rt->rt6i_dev); rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(rt->rt6i_dev);
rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(dst_pmtu(&rt->u.dst)); rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(dst_mtu(&rt->u.dst));
rt->u.dst.output = output; rt->u.dst.output = output;
#if 0 /* there's no chance to use these for ndisc */ #if 0 /* there's no chance to use these for ndisc */
...@@ -971,7 +971,7 @@ int ip6_route_add(struct in6_rtmsg *rtmsg, struct nlmsghdr *nlh, void *_rtattr) ...@@ -971,7 +971,7 @@ int ip6_route_add(struct in6_rtmsg *rtmsg, struct nlmsghdr *nlh, void *_rtattr)
if (!rt->u.dst.metrics[RTAX_MTU-1]) if (!rt->u.dst.metrics[RTAX_MTU-1])
rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(dev); rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(dev);
if (!rt->u.dst.metrics[RTAX_ADVMSS-1]) if (!rt->u.dst.metrics[RTAX_ADVMSS-1])
rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(dst_pmtu(&rt->u.dst)); rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(dst_mtu(&rt->u.dst));
rt->u.dst.dev = dev; rt->u.dst.dev = dev;
rt->rt6i_idev = idev; rt->rt6i_idev = idev;
return ip6_ins_rt(rt, nlh, _rtattr); return ip6_ins_rt(rt, nlh, _rtattr);
...@@ -1134,7 +1134,7 @@ void rt6_redirect(struct in6_addr *dest, struct in6_addr *saddr, ...@@ -1134,7 +1134,7 @@ void rt6_redirect(struct in6_addr *dest, struct in6_addr *saddr,
nrt->rt6i_nexthop = neigh_clone(neigh); nrt->rt6i_nexthop = neigh_clone(neigh);
/* Reset pmtu, it may be better */ /* Reset pmtu, it may be better */
nrt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(neigh->dev); nrt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(neigh->dev);
nrt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(dst_pmtu(&nrt->u.dst)); nrt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(dst_mtu(&nrt->u.dst));
if (ip6_ins_rt(nrt, NULL, NULL)) if (ip6_ins_rt(nrt, NULL, NULL))
goto out; goto out;
...@@ -1164,7 +1164,7 @@ void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr, ...@@ -1164,7 +1164,7 @@ void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr,
if (rt == NULL) if (rt == NULL)
return; return;
if (pmtu >= dst_pmtu(&rt->u.dst)) if (pmtu >= dst_mtu(&rt->u.dst))
goto out; goto out;
if (pmtu < IPV6_MIN_MTU) { if (pmtu < IPV6_MIN_MTU) {
...@@ -1405,7 +1405,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, ...@@ -1405,7 +1405,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
rt->rt6i_dev = &loopback_dev; rt->rt6i_dev = &loopback_dev;
rt->rt6i_idev = idev; rt->rt6i_idev = idev;
rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(rt->rt6i_dev); rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(rt->rt6i_dev);
rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(dst_pmtu(&rt->u.dst)); rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(dst_mtu(&rt->u.dst));
rt->u.dst.metrics[RTAX_HOPLIMIT-1] = -1; rt->u.dst.metrics[RTAX_HOPLIMIT-1] = -1;
rt->u.dst.obsolete = -1; rt->u.dst.obsolete = -1;
...@@ -1480,9 +1480,9 @@ static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg) ...@@ -1480,9 +1480,9 @@ static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
*/ */
if (rt->rt6i_dev == arg->dev && if (rt->rt6i_dev == arg->dev &&
!dst_metric_locked(&rt->u.dst, RTAX_MTU) && !dst_metric_locked(&rt->u.dst, RTAX_MTU) &&
(dst_pmtu(&rt->u.dst) > arg->mtu || (dst_mtu(&rt->u.dst) > arg->mtu ||
(dst_pmtu(&rt->u.dst) < arg->mtu && (dst_mtu(&rt->u.dst) < arg->mtu &&
dst_pmtu(&rt->u.dst) == idev->cnf.mtu6))) dst_mtu(&rt->u.dst) == idev->cnf.mtu6)))
rt->u.dst.metrics[RTAX_MTU-1] = arg->mtu; rt->u.dst.metrics[RTAX_MTU-1] = arg->mtu;
rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(arg->mtu); rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(arg->mtu);
return 0; return 0;
......
...@@ -500,9 +500,9 @@ static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -500,9 +500,9 @@ static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
} }
if (tiph->frag_off) if (tiph->frag_off)
mtu = dst_pmtu(&rt->u.dst) - sizeof(struct iphdr); mtu = dst_mtu(&rt->u.dst) - sizeof(struct iphdr);
else else
mtu = skb->dst ? dst_pmtu(skb->dst) : dev->mtu; mtu = skb->dst ? dst_mtu(skb->dst) : dev->mtu;
if (mtu < 68) { if (mtu < 68) {
tunnel->stat.collisions++; tunnel->stat.collisions++;
......
...@@ -227,7 +227,7 @@ void sctp_transport_pmtu(struct sctp_transport *transport) ...@@ -227,7 +227,7 @@ void sctp_transport_pmtu(struct sctp_transport *transport)
dst = transport->af_specific->get_dst(NULL, &transport->ipaddr, NULL); dst = transport->af_specific->get_dst(NULL, &transport->ipaddr, NULL);
if (dst) { if (dst) {
transport->pmtu = dst_pmtu(dst); transport->pmtu = dst_mtu(dst);
dst_release(dst); dst_release(dst);
} else } else
transport->pmtu = SCTP_DEFAULT_MAXSEGMENT; transport->pmtu = SCTP_DEFAULT_MAXSEGMENT;
...@@ -253,7 +253,7 @@ void sctp_transport_route(struct sctp_transport *transport, ...@@ -253,7 +253,7 @@ void sctp_transport_route(struct sctp_transport *transport,
transport->dst = dst; transport->dst = dst;
if (dst) { if (dst) {
transport->pmtu = dst_pmtu(dst); transport->pmtu = dst_mtu(dst);
/* Initialize sk->sk_rcv_saddr, if the transport is the /* Initialize sk->sk_rcv_saddr, if the transport is the
* association's active path for getsockname(). * association's active path for getsockname().
......
...@@ -1119,12 +1119,12 @@ void xfrm_init_pmtu(struct dst_entry *dst) ...@@ -1119,12 +1119,12 @@ void xfrm_init_pmtu(struct dst_entry *dst)
struct xfrm_dst *xdst = (struct xfrm_dst *)dst; struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
u32 pmtu, route_mtu_cached; u32 pmtu, route_mtu_cached;
pmtu = dst_pmtu(dst->child); pmtu = dst_mtu(dst->child);
xdst->child_mtu_cached = pmtu; xdst->child_mtu_cached = pmtu;
pmtu = xfrm_state_mtu(dst->xfrm, pmtu); pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
route_mtu_cached = dst_pmtu(xdst->route); route_mtu_cached = dst_mtu(xdst->route);
xdst->route_mtu_cached = route_mtu_cached; xdst->route_mtu_cached = route_mtu_cached;
if (pmtu > route_mtu_cached) if (pmtu > route_mtu_cached)
...@@ -1160,7 +1160,7 @@ int xfrm_bundle_ok(struct xfrm_dst *first, struct flowi *fl, int family) ...@@ -1160,7 +1160,7 @@ int xfrm_bundle_ok(struct xfrm_dst *first, struct flowi *fl, int family)
if (dst->xfrm->km.state != XFRM_STATE_VALID) if (dst->xfrm->km.state != XFRM_STATE_VALID)
return 0; return 0;
mtu = dst_pmtu(dst->child); mtu = dst_mtu(dst->child);
if (xdst->child_mtu_cached != mtu) { if (xdst->child_mtu_cached != mtu) {
last = xdst; last = xdst;
xdst->child_mtu_cached = mtu; xdst->child_mtu_cached = mtu;
...@@ -1168,7 +1168,7 @@ int xfrm_bundle_ok(struct xfrm_dst *first, struct flowi *fl, int family) ...@@ -1168,7 +1168,7 @@ int xfrm_bundle_ok(struct xfrm_dst *first, struct flowi *fl, int family)
if (!dst_check(xdst->route, 0)) if (!dst_check(xdst->route, 0))
return 0; return 0;
mtu = dst_pmtu(xdst->route); mtu = dst_mtu(xdst->route);
if (xdst->route_mtu_cached != mtu) { if (xdst->route_mtu_cached != mtu) {
last = xdst; last = xdst;
xdst->route_mtu_cached = mtu; xdst->route_mtu_cached = mtu;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment