Commit 2d14da6a authored by Sridhar Samudrala's avatar Sridhar Samudrala

[SCTP] Fix to avoid large kmalloc failures on 64-bit platforms.

When spinlock debugging is enabled, the size of assoc hash table and
port hash table for a fixed value of 4096 entries exceeds the size
of the largest possible kmalloc() on 64-bit platforms. This problem is
avoided by using page allocations similar to the methodology followed
for tcp hash table allocations.
parent 0d5567da
......@@ -169,11 +169,11 @@ extern struct sctp_globals {
/* This is the hash of all endpoints. */
int ep_hashsize;
struct sctp_hashbucket *ep_hashbucket;
struct sctp_hashbucket *ep_hashtable;
/* This is the hash of all associations. */
int assoc_hashsize;
struct sctp_hashbucket *assoc_hashbucket;
struct sctp_hashbucket *assoc_hashtable;
/* This is the sctp port control hash. */
int port_hashsize;
......@@ -207,9 +207,9 @@ extern struct sctp_globals {
#define sctp_max_outstreams (sctp_globals.max_outstreams)
#define sctp_address_families (sctp_globals.address_families)
#define sctp_ep_hashsize (sctp_globals.ep_hashsize)
#define sctp_ep_hashbucket (sctp_globals.ep_hashbucket)
#define sctp_ep_hashtable (sctp_globals.ep_hashtable)
#define sctp_assoc_hashsize (sctp_globals.assoc_hashsize)
#define sctp_assoc_hashbucket (sctp_globals.assoc_hashbucket)
#define sctp_assoc_hashtable (sctp_globals.assoc_hashtable)
#define sctp_port_hashsize (sctp_globals.port_hashsize)
#define sctp_port_rover (sctp_globals.port_rover)
#define sctp_port_alloc_lock (sctp_globals.port_alloc_lock)
......
......@@ -528,7 +528,7 @@ void __sctp_hash_endpoint(struct sctp_endpoint *ep)
epb = &ep->base;
epb->hashent = sctp_ep_hashfn(epb->bind_addr.port);
head = &sctp_ep_hashbucket[epb->hashent];
head = &sctp_ep_hashtable[epb->hashent];
sctp_write_lock(&head->lock);
epp = &head->chain;
......@@ -558,7 +558,7 @@ void __sctp_unhash_endpoint(struct sctp_endpoint *ep)
epb->hashent = sctp_ep_hashfn(epb->bind_addr.port);
head = &sctp_ep_hashbucket[epb->hashent];
head = &sctp_ep_hashtable[epb->hashent];
sctp_write_lock(&head->lock);
......@@ -589,7 +589,7 @@ struct sctp_endpoint *__sctp_rcv_lookup_endpoint(const union sctp_addr *laddr)
int hash;
hash = sctp_ep_hashfn(laddr->v4.sin_port);
head = &sctp_ep_hashbucket[hash];
head = &sctp_ep_hashtable[hash];
read_lock(&head->lock);
for (epb = head->chain; epb; epb = epb->next) {
ep = sctp_ep(epb);
......@@ -627,7 +627,7 @@ void __sctp_hash_established(struct sctp_association *asoc)
/* Calculate which chain this entry will belong to. */
epb->hashent = sctp_assoc_hashfn(epb->bind_addr.port, asoc->peer.port);
head = &sctp_assoc_hashbucket[epb->hashent];
head = &sctp_assoc_hashtable[epb->hashent];
sctp_write_lock(&head->lock);
epp = &head->chain;
......@@ -658,7 +658,7 @@ void __sctp_unhash_established(struct sctp_association *asoc)
epb->hashent = sctp_assoc_hashfn(epb->bind_addr.port,
asoc->peer.port);
head = &sctp_assoc_hashbucket[epb->hashent];
head = &sctp_assoc_hashtable[epb->hashent];
sctp_write_lock(&head->lock);
......@@ -688,7 +688,7 @@ struct sctp_association *__sctp_lookup_association(
* have wildcards anyways.
*/
hash = sctp_assoc_hashfn(local->v4.sin_port, peer->v4.sin_port);
head = &sctp_assoc_hashbucket[hash];
head = &sctp_assoc_hashtable[hash];
read_lock(&head->lock);
for (epb = head->chain; epb; epb = epb->next) {
asoc = sctp_assoc(epb);
......
......@@ -172,7 +172,7 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, " ENDPT SOCK STY SST HBKT LPORT LADDRS\n");
for (hash = 0; hash < sctp_ep_hashsize; hash++) {
head = &sctp_ep_hashbucket[hash];
head = &sctp_ep_hashtable[hash];
read_lock(&head->lock);
for (epb = head->chain; epb; epb = epb->next) {
ep = sctp_ep(epb);
......@@ -234,7 +234,7 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, " ASSOC SOCK STY SST ST HBKT LPORT RPORT "
"LADDRS <-> RADDRS\n");
for (hash = 0; hash < sctp_assoc_hashsize; hash++) {
head = &sctp_assoc_hashbucket[hash];
head = &sctp_assoc_hashtable[hash];
read_lock(&head->lock);
for (epb = head->chain; epb; epb = epb->next) {
assoc = sctp_assoc(epb);
......
......@@ -934,6 +934,8 @@ __init int sctp_init(void)
{
int i;
int status = 0;
unsigned long goal;
int order;
/* SCTP_DEBUG sanity check. */
if (!sctp_sanity_check())
......@@ -1017,52 +1019,75 @@ __init int sctp_init(void)
sctp_max_instreams = SCTP_DEFAULT_INSTREAMS;
sctp_max_outstreams = SCTP_DEFAULT_OUTSTREAMS;
/* Allocate and initialize the association hash table. */
sctp_assoc_hashsize = 4096;
sctp_assoc_hashbucket = (struct sctp_hashbucket *)
kmalloc(4096 * sizeof(struct sctp_hashbucket), GFP_KERNEL);
if (!sctp_assoc_hashbucket) {
/* Size and allocate the association hash table.
* The methodology is similar to that of the tcp hash tables.
*/
if (num_physpages >= (128 * 1024))
goal = num_physpages >> (22 - PAGE_SHIFT);
else
goal = num_physpages >> (24 - PAGE_SHIFT);
for (order = 0; (1UL << order) < goal; order++)
;
do {
sctp_assoc_hashsize = (1UL << order) * PAGE_SIZE /
sizeof(struct sctp_hashbucket);
if ((sctp_assoc_hashsize > (64 * 1024)) && order > 0)
continue;
sctp_assoc_hashtable = (struct sctp_hashbucket *)
__get_free_pages(GFP_ATOMIC, order);
} while (!sctp_assoc_hashtable && --order > 0);
if (!sctp_assoc_hashtable) {
printk(KERN_ERR "SCTP: Failed association hash alloc.\n");
status = -ENOMEM;
goto err_ahash_alloc;
}
for (i = 0; i < sctp_assoc_hashsize; i++) {
sctp_assoc_hashbucket[i].lock = RW_LOCK_UNLOCKED;
sctp_assoc_hashbucket[i].chain = NULL;
sctp_assoc_hashtable[i].lock = RW_LOCK_UNLOCKED;
sctp_assoc_hashtable[i].chain = NULL;
}
/* Allocate and initialize the endpoint hash table. */
sctp_ep_hashsize = 64;
sctp_ep_hashbucket = (struct sctp_hashbucket *)
sctp_ep_hashtable = (struct sctp_hashbucket *)
kmalloc(64 * sizeof(struct sctp_hashbucket), GFP_KERNEL);
if (!sctp_ep_hashbucket) {
if (!sctp_ep_hashtable) {
printk(KERN_ERR "SCTP: Failed endpoint_hash alloc.\n");
status = -ENOMEM;
goto err_ehash_alloc;
}
for (i = 0; i < sctp_ep_hashsize; i++) {
sctp_ep_hashbucket[i].lock = RW_LOCK_UNLOCKED;
sctp_ep_hashbucket[i].chain = NULL;
sctp_ep_hashtable[i].lock = RW_LOCK_UNLOCKED;
sctp_ep_hashtable[i].chain = NULL;
}
/* Allocate and initialize the SCTP port hash table. */
sctp_port_hashsize = 4096;
do {
sctp_port_hashsize = (1UL << order) * PAGE_SIZE /
sizeof(struct sctp_bind_hashbucket);
if ((sctp_port_hashsize > (64 * 1024)) && order > 0)
continue;
sctp_port_hashtable = (struct sctp_bind_hashbucket *)
kmalloc(4096 * sizeof(struct sctp_bind_hashbucket),GFP_KERNEL);
__get_free_pages(GFP_ATOMIC, order);
} while (!sctp_port_hashtable && --order > 0);
if (!sctp_port_hashtable) {
printk(KERN_ERR "SCTP: Failed bind hash alloc.");
status = -ENOMEM;
goto err_bhash_alloc;
}
sctp_port_alloc_lock = SPIN_LOCK_UNLOCKED;
sctp_port_rover = sysctl_local_port_range[0] - 1;
for (i = 0; i < sctp_port_hashsize; i++) {
sctp_port_hashtable[i].lock = SPIN_LOCK_UNLOCKED;
sctp_port_hashtable[i].chain = NULL;
}
sctp_port_alloc_lock = SPIN_LOCK_UNLOCKED;
sctp_port_rover = sysctl_local_port_range[0] - 1;
printk(KERN_INFO "SCTP: Hash tables configured "
"(established %d bind %d)\n",
sctp_assoc_hashsize, sctp_port_hashsize);
sctp_sysctl_register();
INIT_LIST_HEAD(&sctp_address_families);
......@@ -1096,11 +1121,15 @@ __init int sctp_init(void)
err_v6_init:
sctp_sysctl_unregister();
list_del(&sctp_ipv4_specific.list);
kfree(sctp_port_hashtable);
free_pages((unsigned long)sctp_port_hashtable,
get_order(sctp_port_hashsize *
sizeof(struct sctp_bind_hashbucket)));
err_bhash_alloc:
kfree(sctp_ep_hashbucket);
kfree(sctp_ep_hashtable);
err_ehash_alloc:
kfree(sctp_assoc_hashbucket);
free_pages((unsigned long)sctp_assoc_hashtable,
get_order(sctp_assoc_hashsize *
sizeof(struct sctp_hashbucket)));
err_ahash_alloc:
sctp_dbg_objcnt_exit();
sctp_proc_exit();
......@@ -1136,9 +1165,13 @@ __exit void sctp_exit(void)
sctp_sysctl_unregister();
list_del(&sctp_ipv4_specific.list);
kfree(sctp_assoc_hashbucket);
kfree(sctp_ep_hashbucket);
kfree(sctp_port_hashtable);
free_pages((unsigned long)sctp_assoc_hashtable,
get_order(sctp_assoc_hashsize *
sizeof(struct sctp_hashbucket)));
kfree(sctp_ep_hashtable);
free_pages((unsigned long)sctp_port_hashtable,
get_order(sctp_port_hashsize *
sizeof(struct sctp_bind_hashbucket)));
kmem_cache_destroy(sctp_chunk_cachep);
kmem_cache_destroy(sctp_bucket_cachep);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment