Contributors: 10
Author Tokens Token Proportion Commits Commit Proportion
Jiri Olsa 300 53.29% 17 35.42%
Namhyung Kim 121 21.49% 6 12.50%
Kan Liang 64 11.37% 10 20.83%
Leo Yan 28 4.97% 5 10.42%
Ian Rogers 25 4.44% 3 6.25%
Jin Yao 16 2.84% 2 4.17%
Stéphane Eranian 5 0.89% 1 2.08%
Arnaldo Carvalho de Melo 2 0.36% 2 4.17%
Ingo Molnar 1 0.18% 1 2.08%
Greg Kroah-Hartman 1 0.18% 1 2.08%
Total 563 48


/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __PERF_MEM_EVENTS_H
#define __PERF_MEM_EVENTS_H

#include <stdbool.h>
#include <linux/types.h>

struct perf_mem_event {
	bool		supported;
	bool		ldlat;
	u32		aux_event;
	const char	*tag;
	const char	*name;
	const char	*event_name;
};

enum {
	PERF_MEM_EVENTS__LOAD,
	PERF_MEM_EVENTS__STORE,
	PERF_MEM_EVENTS__LOAD_STORE,
	PERF_MEM_EVENTS__MAX,
};

struct evsel;
struct mem_info;
struct perf_pmu;

extern unsigned int perf_mem_events__loads_ldlat;
extern struct perf_mem_event perf_mem_events[PERF_MEM_EVENTS__MAX];
extern bool perf_mem_record[PERF_MEM_EVENTS__MAX];

int perf_pmu__mem_events_parse(struct perf_pmu *pmu, const char *str);
int perf_pmu__mem_events_init(void);

struct perf_mem_event *perf_pmu__mem_events_ptr(struct perf_pmu *pmu, int i);
struct perf_pmu *perf_mem_events_find_pmu(void);
int perf_pmu__mem_events_num_mem_pmus(struct perf_pmu *pmu);
bool is_mem_loads_aux_event(struct evsel *leader);

void perf_pmu__mem_events_list(struct perf_pmu *pmu);
int perf_mem_events__record_args(const char **rec_argv, int *argv_nr,
				 char **event_name_storage_out);

int perf_mem__tlb_scnprintf(char *out, size_t sz, const struct mem_info *mem_info);
int perf_mem__lvl_scnprintf(char *out, size_t sz, const struct mem_info *mem_info);
int perf_mem__snp_scnprintf(char *out, size_t sz, const struct mem_info *mem_info);
int perf_mem__lck_scnprintf(char *out, size_t sz, const struct mem_info *mem_info);
int perf_mem__blk_scnprintf(char *out, size_t sz, const struct mem_info *mem_info);

int perf_script__meminfo_scnprintf(char *bf, size_t size, const struct mem_info *mem_info);

struct c2c_stats {
	u32	nr_entries;

	u32	locks;               /* count of 'lock' transactions */
	u32	store;               /* count of all stores in trace */
	u32	st_uncache;          /* stores to uncacheable address */
	u32	st_noadrs;           /* cacheable store with no address */
	u32	st_l1hit;            /* count of stores that hit L1D */
	u32	st_l1miss;           /* count of stores that miss L1D */
	u32	st_na;               /* count of stores with memory level is not available */
	u32	load;                /* count of all loads in trace */
	u32	ld_excl;             /* exclusive loads, rmt/lcl DRAM - snp none/miss */
	u32	ld_shared;           /* shared loads, rmt/lcl DRAM - snp hit */
	u32	ld_uncache;          /* loads to uncacheable address */
	u32	ld_io;               /* loads to io address */
	u32	ld_miss;             /* loads miss */
	u32	ld_noadrs;           /* cacheable load with no address */
	u32	ld_fbhit;            /* count of loads hitting Fill Buffer */
	u32	ld_l1hit;            /* count of loads that hit L1D */
	u32	ld_l2hit;            /* count of loads that hit L2D */
	u32	ld_llchit;           /* count of loads that hit LLC */
	u32	lcl_hitm;            /* count of loads with local HITM  */
	u32	rmt_hitm;            /* count of loads with remote HITM */
	u32	tot_hitm;            /* count of loads with local and remote HITM */
	u32	lcl_peer;            /* count of loads with local peer cache */
	u32	rmt_peer;            /* count of loads with remote peer cache */
	u32	tot_peer;            /* count of loads with local and remote peer cache */
	u32	rmt_hit;             /* count of loads with remote hit clean; */
	u32	lcl_dram;            /* count of loads miss to local DRAM */
	u32	rmt_dram;            /* count of loads miss to remote DRAM */
	u32	blk_data;            /* count of loads blocked by data */
	u32	blk_addr;            /* count of loads blocked by address conflict */
	u32	nomap;               /* count of load/stores with no phys addrs */
	u32	noparse;             /* count of unparsable data sources */
};

struct hist_entry;
int c2c_decode_stats(struct c2c_stats *stats, struct mem_info *mi);
void c2c_add_stats(struct c2c_stats *stats, struct c2c_stats *add);

enum mem_stat_type {
	PERF_MEM_STAT_OP,
	PERF_MEM_STAT_CACHE,
	PERF_MEM_STAT_MEMORY,
	PERF_MEM_STAT_SNOOP,
	PERF_MEM_STAT_DTLB,
};

#define MEM_STAT_PRINT_LEN  7  /* 1 space + 5 digits + 1 percent sign */

enum mem_stat_op {
	MEM_STAT_OP_LOAD,
	MEM_STAT_OP_STORE,
	MEM_STAT_OP_LDST,
	MEM_STAT_OP_PFETCH,
	MEM_STAT_OP_EXEC,
	MEM_STAT_OP_OTHER,
};

enum mem_stat_cache {
	MEM_STAT_CACHE_L1,
	MEM_STAT_CACHE_L2,
	MEM_STAT_CACHE_L3,
	MEM_STAT_CACHE_L4,
	MEM_STAT_CACHE_L1_BUF,
	MEM_STAT_CACHE_L2_BUF,
	MEM_STAT_CACHE_OTHER,
};

enum mem_stat_memory {
	MEM_STAT_MEMORY_RAM,
	MEM_STAT_MEMORY_MSC,
	MEM_STAT_MEMORY_UNC,
	MEM_STAT_MEMORY_CXL,
	MEM_STAT_MEMORY_IO,
	MEM_STAT_MEMORY_PMEM,
	MEM_STAT_MEMORY_OTHER,
};

enum mem_stat_snoop {
	MEM_STAT_SNOOP_HIT,
	MEM_STAT_SNOOP_HITM,
	MEM_STAT_SNOOP_MISS,
	MEM_STAT_SNOOP_OTHER,
};

enum mem_stat_dtlb {
	MEM_STAT_DTLB_L1_HIT,
	MEM_STAT_DTLB_L2_HIT,
	MEM_STAT_DTLB_ANY_HIT,
	MEM_STAT_DTLB_MISS,
	MEM_STAT_DTLB_OTHER,
};

int mem_stat_index(const enum mem_stat_type mst, const u64 data_src);
const char *mem_stat_name(const enum mem_stat_type mst, const int idx);

#endif /* __PERF_MEM_EVENTS_H */