Commit 374d1125 authored by David S. Miller's avatar David S. Miller

Merge branch 'reciprocal'

Hannes Frederic Sowa says:

====================
reciprocal_divide update

This patch is on top of aee636c4 ("bpf: do not use reciprocal
divide") from Eric that sits in net tree. It will not create a merge
conflict, but it depends on this one, so we suggest, if possible, to
merge net into net-next.

We are proposing this change with only small modifications from the
v2 version, namely updating the name of trim to reciprocal_scale
(as commented on by Ben Hutchings and Eric Dumazet, thanks!).

We thought about introducing the reciprocal_divide algorithm in
parallel to the one already used by the kernel but faced organizational
issues, leading us to the conclusion that it is best to just replace
the old one: We could not come up with names for the different
implementations and also with a way to describe the differences to
guide developers which one to choose in which situation. This is
because we cannot specify the correct semantics for the version
which is currently used by the kernel. Altough it seems to not be
causing problems in the kernel, we cannot surely say so in the
case of flex_array for the future. Current usage seems ok, but
future users could run into problems.

Changelog:

v1->v2:
 - changed name to prandom_u32_max in p1
 - changed name to trim in p2
 - reworked code in p3
v2->v3:
 - p1 and p3 stays unchanged, only small update in commit
   message in p3
 - changed name to reciprocal_scale in p2
 - fixed kernel doc format
v3->v4:
 - pseduo -> pseudo (thanks to Tilman Schmidt)
v4->v5:
 - fix pseduo -> pseudo for real now, sorry for the noise
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 6cd28f04 809fa972
......@@ -79,7 +79,6 @@
#include <net/pkt_sched.h>
#include <linux/rculist.h>
#include <net/flow_keys.h>
#include <linux/reciprocal_div.h>
#include "bonding.h"
#include "bond_3ad.h"
#include "bond_alb.h"
......@@ -3596,8 +3595,9 @@ static void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int sl
*/
static u32 bond_rr_gen_slave_id(struct bonding *bond)
{
int packets_per_slave = bond->params.packets_per_slave;
u32 slave_id;
struct reciprocal_value reciprocal_packets_per_slave;
int packets_per_slave = bond->params.packets_per_slave;
switch (packets_per_slave) {
case 0:
......@@ -3607,8 +3607,10 @@ static u32 bond_rr_gen_slave_id(struct bonding *bond)
slave_id = bond->rr_tx_counter;
break;
default:
reciprocal_packets_per_slave =
bond->params.reciprocal_packets_per_slave;
slave_id = reciprocal_divide(bond->rr_tx_counter,
packets_per_slave);
reciprocal_packets_per_slave);
break;
}
bond->rr_tx_counter++;
......@@ -4343,10 +4345,18 @@ static int bond_check_params(struct bond_params *params)
params->resend_igmp = resend_igmp;
params->min_links = min_links;
params->lp_interval = lp_interval;
if (packets_per_slave > 1)
params->packets_per_slave = reciprocal_value(packets_per_slave);
else
params->packets_per_slave = packets_per_slave;
params->packets_per_slave = packets_per_slave;
if (packets_per_slave > 0) {
params->reciprocal_packets_per_slave =
reciprocal_value(packets_per_slave);
} else {
/* reciprocal_packets_per_slave is unused if
* packets_per_slave is 0 or 1, just initialize it
*/
params->reciprocal_packets_per_slave =
(struct reciprocal_value) { 0 };
}
if (primary) {
strncpy(params->primary, primary, IFNAMSIZ);
params->primary[IFNAMSIZ - 1] = 0;
......
......@@ -19,7 +19,6 @@
#include <linux/if_ether.h>
#include <net/netlink.h>
#include <net/rtnetlink.h>
#include <linux/reciprocal_div.h>
#include "bonding.h"
int bond_get_slave(struct net_device *slave_dev, struct sk_buff *skb)
......@@ -452,9 +451,6 @@ static int bond_fill_info(struct sk_buff *skb,
goto nla_put_failure;
packets_per_slave = bond->params.packets_per_slave;
if (packets_per_slave > 1)
packets_per_slave = reciprocal_value(packets_per_slave);
if (nla_put_u32(skb, IFLA_BOND_PACKETS_PER_SLAVE,
packets_per_slave))
goto nla_put_failure;
......
......@@ -16,7 +16,6 @@
#include <linux/netdevice.h>
#include <linux/rwlock.h>
#include <linux/rcupdate.h>
#include <linux/reciprocal_div.h>
#include "bonding.h"
int bond_option_mode_set(struct bonding *bond, int mode)
......@@ -671,11 +670,17 @@ int bond_option_packets_per_slave_set(struct bonding *bond,
pr_warn("%s: Warning: packets_per_slave has effect only in balance-rr mode\n",
bond->dev->name);
if (packets_per_slave > 1)
bond->params.packets_per_slave =
bond->params.packets_per_slave = packets_per_slave;
if (packets_per_slave > 0) {
bond->params.reciprocal_packets_per_slave =
reciprocal_value(packets_per_slave);
else
bond->params.packets_per_slave = packets_per_slave;
} else {
/* reciprocal_packets_per_slave is unused if
* packets_per_slave is 0 or 1, just initialize it
*/
bond->params.reciprocal_packets_per_slave =
(struct reciprocal_value) { 0 };
}
return 0;
}
......
......@@ -39,7 +39,6 @@
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <linux/nsproxy.h>
#include <linux/reciprocal_div.h>
#include "bonding.h"
......@@ -1374,10 +1373,6 @@ static ssize_t bonding_show_packets_per_slave(struct device *d,
{
struct bonding *bond = to_bond(d);
unsigned int packets_per_slave = bond->params.packets_per_slave;
if (packets_per_slave > 1)
packets_per_slave = reciprocal_value(packets_per_slave);
return sprintf(buf, "%u\n", packets_per_slave);
}
......
......@@ -23,6 +23,8 @@
#include <linux/netpoll.h>
#include <linux/inetdevice.h>
#include <linux/etherdevice.h>
#include <linux/reciprocal_div.h>
#include "bond_3ad.h"
#include "bond_alb.h"
......@@ -171,6 +173,7 @@ struct bond_params {
int resend_igmp;
int lp_interval;
int packets_per_slave;
struct reciprocal_value reciprocal_packets_per_slave;
};
struct bond_parm_tbl {
......
......@@ -13,20 +13,14 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/reciprocal_div.h>
#include <linux/if_team.h>
static u32 random_N(unsigned int N)
{
return reciprocal_divide(prandom_u32(), N);
}
static bool rnd_transmit(struct team *team, struct sk_buff *skb)
{
struct team_port *port;
int port_index;
port_index = random_N(team->en_port_count);
port_index = prandom_u32_max(team->en_port_count);
port = team_get_port_by_index_rcu(team, port_index);
if (unlikely(!port))
goto drop;
......
......@@ -2,6 +2,7 @@
#define _FLEX_ARRAY_H
#include <linux/types.h>
#include <linux/reciprocal_div.h>
#include <asm/page.h>
#define FLEX_ARRAY_PART_SIZE PAGE_SIZE
......@@ -22,7 +23,7 @@ struct flex_array {
int element_size;
int total_nr_elements;
int elems_per_part;
u32 reciprocal_elems;
struct reciprocal_value reciprocal_elems;
struct flex_array_part *parts[];
};
/*
......
......@@ -193,6 +193,25 @@ extern int _cond_resched(void);
(__x < 0) ? -__x : __x; \
})
/**
* reciprocal_scale - "scale" a value into range [0, ep_ro)
* @val: value
* @ep_ro: right open interval endpoint
*
* Perform a "reciprocal multiplication" in order to "scale" a value into
* range [0, ep_ro), where the upper interval endpoint is right-open.
* This is useful, e.g. for accessing a index of an array containing
* ep_ro elements, for example. Think of it as sort of modulus, only that
* the result isn't that of modulo. ;) Note that if initial input is a
* small value, then result will return 0.
*
* Return: a result based on val in interval [0, ep_ro).
*/
static inline u32 reciprocal_scale(u32 val, u32 ep_ro)
{
return (u32)(((u64) val * ep_ro) >> 32);
}
#if defined(CONFIG_MMU) && \
(defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP))
void might_fault(void);
......
......@@ -8,7 +8,6 @@
#include <uapi/linux/random.h>
extern void add_device_randomness(const void *, unsigned int);
extern void add_input_randomness(unsigned int type, unsigned int code,
unsigned int value);
......@@ -38,6 +37,23 @@ struct rnd_state {
u32 prandom_u32_state(struct rnd_state *state);
void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes);
/**
* prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro)
* @ep_ro: right open interval endpoint
*
* Returns a pseudo-random number that is in interval [0, ep_ro). Note
* that the result depends on PRNG being well distributed in [0, ~0U]
* u32 space. Here we use maximally equidistributed combined Tausworthe
* generator, that is, prandom_u32(). This is useful when requesting a
* random index of an array containing ep_ro elements, for example.
*
* Returns: pseudo-random number in interval [0, ep_ro)
*/
static inline u32 prandom_u32_max(u32 ep_ro)
{
return (u32)(((u64) prandom_u32() * ep_ro) >> 32);
}
/*
* Handle minimum values for seeds
*/
......
......@@ -4,29 +4,32 @@
#include <linux/types.h>
/*
* This file describes reciprocical division.
* This algorithm is based on the paper "Division by Invariant
* Integers Using Multiplication" by Torbjörn Granlund and Peter
* L. Montgomery.
*
* This optimizes the (A/B) problem, when A and B are two u32
* and B is a known value (but not known at compile time)
* The assembler implementation from Agner Fog, which this code is
* based on, can be found here:
* http://www.agner.org/optimize/asmlib.zip
*
* The math principle used is :
* Let RECIPROCAL_VALUE(B) be (((1LL << 32) + (B - 1))/ B)
* Then A / B = (u32)(((u64)(A) * (R)) >> 32)
*
* This replaces a divide by a multiply (and a shift), and
* is generally less expensive in CPU cycles.
* This optimization for A/B is helpful if the divisor B is mostly
* runtime invariant. The reciprocal of B is calculated in the
* slow-path with reciprocal_value(). The fast-path can then just use
* a much faster multiplication operation with a variable dividend A
* to calculate the division A/B.
*/
/*
* Computes the reciprocal value (R) for the value B of the divisor.
* Should not be called before each reciprocal_divide(),
* or else the performance is slower than a normal divide.
*/
extern u32 reciprocal_value(u32 B);
struct reciprocal_value {
u32 m;
u8 sh1, sh2;
};
struct reciprocal_value reciprocal_value(u32 d);
static inline u32 reciprocal_divide(u32 A, u32 R)
static inline u32 reciprocal_divide(u32 a, struct reciprocal_value R)
{
return (u32)(((u64)A * R) >> 32);
u32 t = (u32)(((u64)a * R.m) >> 32);
return (t + ((a - t) >> R.sh1)) >> R.sh2;
}
#endif
#endif /* _LINUX_RECIPROCAL_DIV_H */
#ifndef _LINUX_SLAB_DEF_H
#define _LINUX_SLAB_DEF_H
#include <linux/reciprocal_div.h>
/*
* Definitions unique to the original Linux SLAB allocator.
*/
......@@ -12,7 +14,7 @@ struct kmem_cache {
unsigned int shared;
unsigned int size;
u32 reciprocal_buffer_size;
struct reciprocal_value reciprocal_buffer_size;
/* 2) touched by every alloc & free from the backend */
unsigned int flags; /* constant flags */
......
......@@ -46,7 +46,6 @@
#include <linux/skbuff.h>
#include <net/pkt_sched.h>
#include <net/inet_ecn.h>
#include <linux/reciprocal_div.h>
/* Controlling Queue Delay (CoDel) algorithm
* =========================================
......@@ -211,10 +210,9 @@ static codel_time_t codel_control_law(codel_time_t t,
codel_time_t interval,
u32 rec_inv_sqrt)
{
return t + reciprocal_divide(interval, rec_inv_sqrt << REC_INV_SQRT_SHIFT);
return t + reciprocal_scale(interval, rec_inv_sqrt << REC_INV_SQRT_SHIFT);
}
static bool codel_should_drop(const struct sk_buff *skb,
struct Qdisc *sch,
struct codel_vars *vars,
......
......@@ -130,7 +130,8 @@ struct red_parms {
u32 qth_max; /* Max avg length threshold: Wlog scaled */
u32 Scell_max;
u32 max_P; /* probability, [0 .. 1.0] 32 scaled */
u32 max_P_reciprocal; /* reciprocal_value(max_P / qth_delta) */
/* reciprocal_value(max_P / qth_delta) */
struct reciprocal_value max_P_reciprocal;
u32 qth_delta; /* max_th - min_th */
u32 target_min; /* min_th + 0.4*(max_th - min_th) */
u32 target_max; /* min_th + 0.6*(max_th - min_th) */
......
......@@ -90,8 +90,8 @@ struct flex_array *flex_array_alloc(int element_size, unsigned int total,
{
struct flex_array *ret;
int elems_per_part = 0;
int reciprocal_elems = 0;
int max_size = 0;
struct reciprocal_value reciprocal_elems = { 0 };
if (element_size) {
elems_per_part = FLEX_ARRAY_ELEMENTS_PER_PART(element_size);
......@@ -119,6 +119,11 @@ EXPORT_SYMBOL(flex_array_alloc);
static int fa_element_to_part_nr(struct flex_array *fa,
unsigned int element_nr)
{
/*
* if element_size == 0 we don't get here, so we never touch
* the zeroed fa->reciprocal_elems, which would yield invalid
* results
*/
return reciprocal_divide(element_nr, fa->reciprocal_elems);
}
......
#include <linux/kernel.h>
#include <asm/div64.h>
#include <linux/reciprocal_div.h>
#include <linux/export.h>
u32 reciprocal_value(u32 k)
/*
* For a description of the algorithm please have a look at
* include/linux/reciprocal_div.h
*/
struct reciprocal_value reciprocal_value(u32 d)
{
u64 val = (1LL << 32) + (k - 1);
do_div(val, k);
return (u32)val;
struct reciprocal_value R;
u64 m;
int l;
l = fls(d - 1);
m = ((1ULL << 32) * ((1ULL << l) - d));
do_div(m, d);
++m;
R.m = (u32)m;
R.sh1 = min(l, 1);
R.sh2 = max(l - 1, 0);
return R;
}
EXPORT_SYMBOL(reciprocal_value);
......@@ -88,7 +88,6 @@
#include <linux/virtio_net.h>
#include <linux/errqueue.h>
#include <linux/net_tstamp.h>
#include <linux/reciprocal_div.h>
#include <linux/percpu.h>
#ifdef CONFIG_INET
#include <net/inet_common.h>
......@@ -1262,7 +1261,7 @@ static unsigned int fanout_demux_hash(struct packet_fanout *f,
struct sk_buff *skb,
unsigned int num)
{
return reciprocal_divide(skb->rxhash, num);
return reciprocal_scale(skb->rxhash, num);
}
static unsigned int fanout_demux_lb(struct packet_fanout *f,
......@@ -1289,7 +1288,7 @@ static unsigned int fanout_demux_rnd(struct packet_fanout *f,
struct sk_buff *skb,
unsigned int num)
{
return reciprocal_divide(prandom_u32(), num);
return prandom_u32_max(num);
}
static unsigned int fanout_demux_rollover(struct packet_fanout *f,
......
......@@ -14,7 +14,6 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/reciprocal_div.h>
#include <linux/vmalloc.h>
#include <net/pkt_sched.h>
#include <net/inet_ecn.h>
......@@ -77,12 +76,6 @@ struct choke_sched_data {
struct sk_buff **tab;
};
/* deliver a random number between 0 and N - 1 */
static u32 random_N(unsigned int N)
{
return reciprocal_divide(prandom_u32(), N);
}
/* number of elements in queue including holes */
static unsigned int choke_len(const struct choke_sched_data *q)
{
......@@ -233,7 +226,7 @@ static struct sk_buff *choke_peek_random(const struct choke_sched_data *q,
int retrys = 3;
do {
*pidx = (q->head + random_N(choke_len(q))) & q->tab_mask;
*pidx = (q->head + prandom_u32_max(choke_len(q))) & q->tab_mask;
skb = q->tab[*pidx];
if (skb)
return skb;
......
......@@ -91,7 +91,7 @@ struct netem_sched_data {
u64 rate;
s32 packet_overhead;
u32 cell_size;
u32 cell_size_reciprocal;
struct reciprocal_value cell_size_reciprocal;
s32 cell_overhead;
struct crndstate {
......@@ -725,9 +725,11 @@ static void get_rate(struct Qdisc *sch, const struct nlattr *attr)
q->rate = r->rate;
q->packet_overhead = r->packet_overhead;
q->cell_size = r->cell_size;
q->cell_overhead = r->cell_overhead;
if (q->cell_size)
q->cell_size_reciprocal = reciprocal_value(q->cell_size);
q->cell_overhead = r->cell_overhead;
else
q->cell_size_reciprocal = (struct reciprocal_value) { 0 };
}
static int get_loss_clg(struct Qdisc *sch, const struct nlattr *attr)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment