Release 4.11 net/ipv4/tcp_scalable.c
/* Tom Kelly's Scalable TCP
*
* See http://www.deneholme.net/tom/scalable/
*
* John Heffner <jheffner@sc.edu>
*/
#include <linux/module.h>
#include <net/tcp.h>
/* These factors derived from the recommended values in the aer:
* .01 and and 7/8. We use 50 instead of 100 to account for
* delayed ack.
*/
#define TCP_SCALABLE_AI_CNT 50U
#define TCP_SCALABLE_MD_SCALE 3
struct scalable {
u32 loss_cwnd;
};
static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked)
{
struct tcp_sock *tp = tcp_sk(sk);
if (!tcp_is_cwnd_limited(sk))
return;
if (tcp_in_slow_start(tp))
tcp_slow_start(tp, acked);
else
tcp_cong_avoid_ai(tp, min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT),
1);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
John Heffner | 35 | 52.24% | 1 | 12.50% |
Arnaldo Carvalho de Melo | 12 | 17.91% | 1 | 12.50% |
Stephen Hemminger | 8 | 11.94% | 2 | 25.00% |
Yuchung Cheng | 6 | 8.96% | 2 | 25.00% |
Ilpo Järvinen | 4 | 5.97% | 1 | 12.50% |
Neal Cardwell | 2 | 2.99% | 1 | 12.50% |
Total | 67 | 100.00% | 8 | 100.00% |
static u32 tcp_scalable_ssthresh(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
struct scalable *ca = inet_csk_ca(sk);
ca->loss_cwnd = tp->snd_cwnd;
return max(tp->snd_cwnd - (tp->snd_cwnd>>TCP_SCALABLE_MD_SCALE), 2U);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
John Heffner | 27 | 46.55% | 1 | 33.33% |
Florian Westphal | 18 | 31.03% | 1 | 33.33% |
Arnaldo Carvalho de Melo | 13 | 22.41% | 1 | 33.33% |
Total | 58 | 100.00% | 3 | 100.00% |
static u32 tcp_scalable_cwnd_undo(struct sock *sk)
{
const struct scalable *ca = inet_csk_ca(sk);
return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Florian Westphal | 37 | 100.00% | 1 | 100.00% |
Total | 37 | 100.00% | 1 | 100.00% |
static struct tcp_congestion_ops tcp_scalable __read_mostly = {
.ssthresh = tcp_scalable_ssthresh,
.undo_cwnd = tcp_scalable_cwnd_undo,
.cong_avoid = tcp_scalable_cong_avoid,
.owner = THIS_MODULE,
.name = "scalable",
};
static int __init tcp_scalable_register(void)
{
return tcp_register_congestion_control(&tcp_scalable);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
John Heffner | 16 | 100.00% | 1 | 100.00% |
Total | 16 | 100.00% | 1 | 100.00% |
static void __exit tcp_scalable_unregister(void)
{
tcp_unregister_congestion_control(&tcp_scalable);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
John Heffner | 15 | 100.00% | 1 | 100.00% |
Total | 15 | 100.00% | 1 | 100.00% |
module_init(tcp_scalable_register);
module_exit(tcp_scalable_unregister);
MODULE_AUTHOR("John Heffner");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Scalable TCP");
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
John Heffner | 160 | 58.18% | 1 | 9.09% |
Florian Westphal | 68 | 24.73% | 1 | 9.09% |
Arnaldo Carvalho de Melo | 25 | 9.09% | 1 | 9.09% |
Stephen Hemminger | 9 | 3.27% | 3 | 27.27% |
Yuchung Cheng | 6 | 2.18% | 2 | 18.18% |
Ilpo Järvinen | 4 | 1.45% | 1 | 9.09% |
Neal Cardwell | 2 | 0.73% | 1 | 9.09% |
Joe Perches | 1 | 0.36% | 1 | 9.09% |
Total | 275 | 100.00% | 11 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.