cregit-Linux how code gets into the kernel

Release 4.14 net/ipv4/tcp_probe.c

Directory: net/ipv4
/*
 * tcpprobe - Observe the TCP flow with kprobes.
 *
 * The idea for this came from Werner Almesberger's umlsim
 * Copyright (C) 2004, Stephen Hemminger <shemminger@osdl.org>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */


#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

#include <linux/kernel.h>
#include <linux/kprobes.h>
#include <linux/socket.h>
#include <linux/tcp.h>
#include <linux/slab.h>
#include <linux/proc_fs.h>
#include <linux/module.h>
#include <linux/ktime.h>
#include <linux/time.h>
#include <net/net_namespace.h>

#include <net/tcp.h>

MODULE_AUTHOR("Stephen Hemminger <shemminger@linux-foundation.org>");
MODULE_DESCRIPTION("TCP cwnd snooper");
MODULE_LICENSE("GPL");
MODULE_VERSION("1.1");


static int port __read_mostly;
MODULE_PARM_DESC(port, "Port to match (0=all)");
module_param(port, int, 0);


static unsigned int bufsize __read_mostly = 4096;
MODULE_PARM_DESC(bufsize, "Log buffer size in packets (4096)");
module_param(bufsize, uint, 0);


static unsigned int fwmark __read_mostly;
MODULE_PARM_DESC(fwmark, "skb mark to match (0=no mark)");
module_param(fwmark, uint, 0);


static int full __read_mostly;
MODULE_PARM_DESC(full, "Full log (1=every ack packet received,  0=only cwnd changes)");
module_param(full, int, 0);


static const char procname[] = "tcpprobe";


struct tcp_log {
	
ktime_t tstamp;
	union {
		
struct sockaddr		raw;
		
struct sockaddr_in	v4;
		
struct sockaddr_in6	v6;
	

}	src, dst;
	
u16	length;
	
u32	snd_nxt;
	
u32	snd_una;
	
u32	snd_wnd;
	
u32	rcv_wnd;
	
u32	snd_cwnd;
	
u32	ssthresh;
	
u32	srtt;
};

static struct {
	
spinlock_t	lock;
	
wait_queue_head_t wait;
	
ktime_t		start;
	
u32		lastcwnd;

	

unsigned long	head, tail;
	
struct tcp_log	*log;

} tcp_probe;


static inline int tcp_probe_used(void) { return (tcp_probe.head - tcp_probe.tail) & (bufsize - 1); }

Contributors

PersonTokensPropCommitsCommitProp
Stephen Hemminger2388.46%266.67%
David S. Miller311.54%133.33%
Total26100.00%3100.00%


static inline int tcp_probe_avail(void) { return bufsize - tcp_probe_used() - 1; }

Contributors

PersonTokensPropCommitsCommitProp
Stephen Hemminger17100.00%3100.00%
Total17100.00%3100.00%

#define tcp_probe_copy_fl_to_si4(inet, si4, mem) \ do { \ si4.sin_family = AF_INET; \ si4.sin_port = inet->inet_##mem##port; \ si4.sin_addr.s_addr = inet->inet_##mem##addr; \ } while (0) \ /* * Hook inserted to be called before each receive packet. * Note: arguments must match tcp_rcv_established()! */
static void jtcp_rcv_established(struct sock *sk, struct sk_buff *skb, const struct tcphdr *th) { unsigned int len = skb->len; const struct tcp_sock *tp = tcp_sk(sk); const struct inet_sock *inet = inet_sk(sk); /* Only update if port or skb mark matches */ if (((port == 0 && fwmark == 0) || ntohs(inet->inet_dport) == port || ntohs(inet->inet_sport) == port || (fwmark > 0 && skb->mark == fwmark)) && (full || tp->snd_cwnd != tcp_probe.lastcwnd)) { spin_lock(&tcp_probe.lock); /* If log fills, just silently drop */ if (tcp_probe_avail() > 1) { struct tcp_log *p = tcp_probe.log + tcp_probe.head; p->tstamp = ktime_get(); switch (sk->sk_family) { case AF_INET: tcp_probe_copy_fl_to_si4(inet, p->src.v4, s); tcp_probe_copy_fl_to_si4(inet, p->dst.v4, d); break; case AF_INET6: memset(&p->src.v6, 0, sizeof(p->src.v6)); memset(&p->dst.v6, 0, sizeof(p->dst.v6)); #if IS_ENABLED(CONFIG_IPV6) p->src.v6.sin6_family = AF_INET6; p->src.v6.sin6_port = inet->inet_sport; p->src.v6.sin6_addr = inet6_sk(sk)->saddr; p->dst.v6.sin6_family = AF_INET6; p->dst.v6.sin6_port = inet->inet_dport; p->dst.v6.sin6_addr = sk->sk_v6_daddr; #endif break; default: BUG(); } p->length = len; p->snd_nxt = tp->snd_nxt; p->snd_una = tp->snd_una; p->snd_cwnd = tp->snd_cwnd; p->snd_wnd = tp->snd_wnd; p->rcv_wnd = tp->rcv_wnd; p->ssthresh = tcp_current_ssthresh(sk); p->srtt = tp->srtt_us >> 3; tcp_probe.head = (tcp_probe.head + 1) & (bufsize - 1); } tcp_probe.lastcwnd = tp->snd_cwnd; spin_unlock(&tcp_probe.lock); wake_up(&tcp_probe.wait); } jprobe_return(); }

Contributors

PersonTokensPropCommitsCommitProp
Stephen Hemminger22953.76%531.25%
Eric Dumazet10925.59%531.25%
Daniel Borkmann8018.78%425.00%
Ilya V. Matveychikov71.64%16.25%
Vijay Subramanian10.23%16.25%
Total426100.00%16100.00%

static struct jprobe tcp_jprobe = { .kp = { .symbol_name = "tcp_rcv_established", }, .entry = jtcp_rcv_established, };
static int tcpprobe_open(struct inode *inode, struct file *file) { /* Reset (empty) log */ spin_lock_bh(&tcp_probe.lock); tcp_probe.head = tcp_probe.tail = 0; tcp_probe.start = ktime_get(); spin_unlock_bh(&tcp_probe.lock); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Stephen Hemminger53100.00%1100.00%
Total53100.00%1100.00%


static int tcpprobe_sprint(char *tbuf, int n) { const struct tcp_log *p = tcp_probe.log + tcp_probe.tail; struct timespec64 ts = ktime_to_timespec64(ktime_sub(p->tstamp, tcp_probe.start)); return scnprintf(tbuf, n, "%lu.%09lu %pISpc %pISpc %d %#x %#x %u %u %u %u %u\n", (unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec, &p->src, &p->dst, p->length, p->snd_nxt, p->snd_una, p->snd_cwnd, p->ssthresh, p->snd_wnd, p->srtt, p->rcv_wnd); }

Contributors

PersonTokensPropCommitsCommitProp
Stephen Hemminger9886.73%337.50%
Daniel Borkmann76.19%225.00%
Deepa Dinamani54.42%112.50%
Harvey Harrison21.77%112.50%
Vasiliy Kulikov10.88%112.50%
Total113100.00%8100.00%


static ssize_t tcpprobe_read(struct file *file, char __user *buf, size_t len, loff_t *ppos) { int error = 0; size_t cnt = 0; if (!buf) return -EINVAL; while (cnt < len) { char tbuf[256]; int width; /* Wait for data in buffer */ error = wait_event_interruptible(tcp_probe.wait, tcp_probe_used() > 0); if (error) break; spin_lock_bh(&tcp_probe.lock); if (tcp_probe.head == tcp_probe.tail) { /* multiple readers race? */ spin_unlock_bh(&tcp_probe.lock); continue; } width = tcpprobe_sprint(tbuf, sizeof(tbuf)); if (cnt + width < len) tcp_probe.tail = (tcp_probe.tail + 1) & (bufsize - 1); spin_unlock_bh(&tcp_probe.lock); /* if record greater than space available return partial buffer (so far) */ if (cnt + width >= len) break; if (copy_to_user(buf + cnt, tbuf, width)) return -EFAULT; cnt += width; } return cnt == 0 ? error : cnt; }

Contributors

PersonTokensPropCommitsCommitProp
Stephen Hemminger18092.31%342.86%
Tom Quetchenbach105.13%114.29%
Roel Kluin21.03%114.29%
James Morris21.03%114.29%
Daniel Borkmann10.51%114.29%
Total195100.00%7100.00%

static const struct file_operations tcpprobe_fops = { .owner = THIS_MODULE, .open = tcpprobe_open, .read = tcpprobe_read, .llseek = noop_llseek, };
static __init int tcpprobe_init(void) { int ret = -ENOMEM; /* Warning: if the function signature of tcp_rcv_established, * has been changed, you also have to change the signature of * jtcp_rcv_established, otherwise you end up right here! */ BUILD_BUG_ON(__same_type(tcp_rcv_established, jtcp_rcv_established) == 0); init_waitqueue_head(&tcp_probe.wait); spin_lock_init(&tcp_probe.lock); if (bufsize == 0) return -EINVAL; bufsize = roundup_pow_of_two(bufsize); tcp_probe.log = kcalloc(bufsize, sizeof(struct tcp_log), GFP_KERNEL); if (!tcp_probe.log) goto err0; if (!proc_create(procname, S_IRUSR, init_net.proc_net, &tcpprobe_fops)) goto err0; ret = register_jprobe(&tcp_jprobe); if (ret) goto err1; pr_info("probe registered (port=%d/fwmark=%u) bufsize=%u\n", port, fwmark, bufsize); return 0; err1: remove_proc_entry(procname, init_net.proc_net); err0: kfree(tcp_probe.log); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Stephen Hemminger12880.00%333.33%
Daniel Borkmann1610.00%222.22%
Gao Feng106.25%222.22%
Akinobu Mita42.50%111.11%
Milton D. Miller II21.25%111.11%
Total160100.00%9100.00%

module_init(tcpprobe_init);
static __exit void tcpprobe_exit(void) { remove_proc_entry(procname, init_net.proc_net); unregister_jprobe(&tcp_jprobe); kfree(tcp_probe.log); }

Contributors

PersonTokensPropCommitsCommitProp
Stephen Hemminger2683.87%375.00%
Gao Feng516.13%125.00%
Total31100.00%4100.00%

module_exit(tcpprobe_exit);

Overall Contributors

PersonTokensPropCommitsCommitProp
Stephen Hemminger99174.40%617.65%
Daniel Borkmann15711.79%514.71%
Eric Dumazet1118.33%514.71%
Gao Feng151.13%25.88%
Tom Quetchenbach100.75%12.94%
Ilya V. Matveychikov70.53%12.94%
Joe Perches70.53%12.94%
Arnd Bergmann50.38%12.94%
Deepa Dinamani50.38%12.94%
Akinobu Mita40.30%12.94%
David S. Miller30.23%12.94%
Eric W. Biedermann30.23%12.94%
Tejun Heo30.23%12.94%
Milton D. Miller II20.15%12.94%
Harvey Harrison20.15%12.94%
James Morris20.15%12.94%
Roel Kluin20.15%12.94%
Vasiliy Kulikov10.08%12.94%
Arjan van de Ven10.08%12.94%
Vijay Subramanian10.08%12.94%
Total1332100.00%34100.00%
Directory: net/ipv4
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.