Contributors: 6
Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
Yuchung Cheng |
128 |
76.65% |
4 |
33.33% |
Linus Torvalds (pre-git) |
17 |
10.18% |
4 |
33.33% |
Daniel Borkmann |
16 |
9.58% |
1 |
8.33% |
David S. Miller |
3 |
1.80% |
1 |
8.33% |
Ilpo Järvinen |
2 |
1.20% |
1 |
8.33% |
Arnaldo Carvalho de Melo |
1 |
0.60% |
1 |
8.33% |
Total |
167 |
|
12 |
|
#ifndef _TCP_DCTCP_H
#define _TCP_DCTCP_H
static inline void dctcp_ece_ack_cwr(struct sock *sk, u32 ce_state)
{
struct tcp_sock *tp = tcp_sk(sk);
if (ce_state == 1)
tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
else
tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
}
/* Minimal DCTP CE state machine:
*
* S: 0 <- last pkt was non-CE
* 1 <- last pkt was CE
*/
static inline void dctcp_ece_ack_update(struct sock *sk, enum tcp_ca_event evt,
u32 *prior_rcv_nxt, u32 *ce_state)
{
u32 new_ce_state = (evt == CA_EVENT_ECN_IS_CE) ? 1 : 0;
if (*ce_state != new_ce_state) {
/* CE state has changed, force an immediate ACK to
* reflect the new CE state. If an ACK was delayed,
* send that first to reflect the prior CE state.
*/
if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) {
dctcp_ece_ack_cwr(sk, *ce_state);
__tcp_send_ack(sk, *prior_rcv_nxt, 0);
}
inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
}
*prior_rcv_nxt = tcp_sk(sk)->rcv_nxt;
*ce_state = new_ce_state;
dctcp_ece_ack_cwr(sk, new_ce_state);
}
#endif