OXIESEC PANEL
- Current Dir:
/
/
usr
/
src
/
linux-headers-4.15.0-213
/
include
/
trace
/
events
Server IP: 139.59.38.164
Upload:
Create Dir:
Name
Size
Modified
Perms
📁
..
-
05/09/2024 07:14:16 AM
rwxr-xr-x
📄
9p.h
5.1 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
afs.h
13.13 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
alarmtimer.h
2.04 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
asoc.h
5.17 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
bcache.h
10.9 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
block.h
17.18 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
bpf.h
8.23 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
bridge.h
3.45 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
btrfs.h
46.21 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
cgroup.h
3.21 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
clk.h
3.69 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
cma.h
1.31 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
compaction.h
7.76 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
context_tracking.h
1.15 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
cpuhp.h
1.83 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
devlink.h
1.76 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
dma_fence.h
1.66 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
ext4.h
64.44 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
f2fs.h
36.38 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
fib.h
2.45 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
fib6.h
1.82 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
filelock.h
6.27 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
filemap.h
2.74 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
fs.h
946 bytes
06/16/2023 05:32:39 PM
rw-r--r--
📄
fs_dax.h
8.26 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
fsi.h
2.81 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
fsi_master_gpio.h
1.54 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
gpio.h
1.13 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
host1x.h
5.77 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
hswadsp.h
8.37 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
huge_memory.h
4.33 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
i2c.h
4.15 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
intel-sst.h
2.51 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
intel_ish.h
556 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
iommu.h
3.03 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
ipi.h
1.88 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
irq.h
3.8 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
irq_matrix.h
4.94 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
jbd2.h
9.79 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
kmem.h
7.39 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
kvm.h
9.03 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
libata.h
11.57 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
lock.h
1.7 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
mce.h
1.85 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
mdio.h
1.02 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
migrate.h
2.42 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
mmc.h
7.24 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
mmflags.h
8.29 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
module.h
2.78 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
napi.h
1013 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
net.h
6.56 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
nilfs2.h
5.2 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
nmi.h
780 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
oom.h
3.88 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
page_isolation.h
943 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
page_ref.h
2.99 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
pagemap.h
2.14 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
percpu.h
2.61 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
power.h
11.35 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
power_cpu_migrate.h
1.59 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
preemptirq.h
1.95 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
printk.h
786 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
qdisc.h
1.37 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
rcu.h
24.64 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
regulator.h
2.4 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
rpm.h
2.34 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
rxrpc.h
33.79 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
sched.h
14.12 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
scsi.h
12.32 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
signal.h
3.43 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
skb.h
1.38 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
smbus.h
7.51 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
sock.h
1.72 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
spi.h
3.31 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
spmi.h
3.24 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
sunrpc.h
18.31 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
sunvnet.h
3.06 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
swiotlb.h
1.22 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
syscalls.h
1.48 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
target.h
7.52 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
task.h
1.49 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
tcp.h
6.57 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
thermal.h
5.58 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
thermal_power_allocator.h
3.22 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
thp.h
2.09 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
timer.h
9.58 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
tlb.h
1.41 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
udp.h
632 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
ufs.h
6.85 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
v4l2.h
8.81 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
vb2.h
1.73 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
vmscan.h
12.47 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
vsock_virtio_transport_common.h
3.49 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
wbt.h
3.83 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
workqueue.h
2.7 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
writeback.h
19.81 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
xdp.h
5.76 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
xen.h
14.66 KB
06/16/2023 05:32:39 PM
rw-r--r--
Editing: bcache.h
Close
/* SPDX-License-Identifier: GPL-2.0 */ #undef TRACE_SYSTEM #define TRACE_SYSTEM bcache #if !defined(_TRACE_BCACHE_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_BCACHE_H #include <linux/tracepoint.h> DECLARE_EVENT_CLASS(bcache_request, TP_PROTO(struct bcache_device *d, struct bio *bio), TP_ARGS(d, bio), TP_STRUCT__entry( __field(dev_t, dev ) __field(unsigned int, orig_major ) __field(unsigned int, orig_minor ) __field(sector_t, sector ) __field(dev_t, orig_sector ) __field(unsigned int, nr_sector ) __array(char, rwbs, 6 ) ), TP_fast_assign( __entry->dev = bio_dev(bio); __entry->orig_major = d->disk->major; __entry->orig_minor = d->disk->first_minor; __entry->sector = bio->bi_iter.bi_sector; __entry->orig_sector = bio->bi_iter.bi_sector - 16; __entry->nr_sector = bio->bi_iter.bi_size >> 9; blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size); ), TP_printk("%d,%d %s %llu + %u (from %d,%d @ %llu)", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, (unsigned long long)__entry->sector, __entry->nr_sector, __entry->orig_major, __entry->orig_minor, (unsigned long long)__entry->orig_sector) ); DECLARE_EVENT_CLASS(bkey, TP_PROTO(struct bkey *k), TP_ARGS(k), TP_STRUCT__entry( __field(u32, size ) __field(u32, inode ) __field(u64, offset ) __field(bool, dirty ) ), TP_fast_assign( __entry->inode = KEY_INODE(k); __entry->offset = KEY_OFFSET(k); __entry->size = KEY_SIZE(k); __entry->dirty = KEY_DIRTY(k); ), TP_printk("%u:%llu len %u dirty %u", __entry->inode, __entry->offset, __entry->size, __entry->dirty) ); DECLARE_EVENT_CLASS(btree_node, TP_PROTO(struct btree *b), TP_ARGS(b), TP_STRUCT__entry( __field(size_t, bucket ) ), TP_fast_assign( __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0); ), TP_printk("bucket %zu", __entry->bucket) ); /* request.c */ DEFINE_EVENT(bcache_request, bcache_request_start, TP_PROTO(struct bcache_device *d, struct bio *bio), TP_ARGS(d, bio) ); DEFINE_EVENT(bcache_request, bcache_request_end, TP_PROTO(struct bcache_device *d, struct bio *bio), TP_ARGS(d, bio) ); DECLARE_EVENT_CLASS(bcache_bio, TP_PROTO(struct bio *bio), TP_ARGS(bio), TP_STRUCT__entry( __field(dev_t, dev ) __field(sector_t, sector ) __field(unsigned int, nr_sector ) __array(char, rwbs, 6 ) ), TP_fast_assign( __entry->dev = bio_dev(bio); __entry->sector = bio->bi_iter.bi_sector; __entry->nr_sector = bio->bi_iter.bi_size >> 9; blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size); ), TP_printk("%d,%d %s %llu + %u", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, (unsigned long long)__entry->sector, __entry->nr_sector) ); DEFINE_EVENT(bcache_bio, bcache_bypass_sequential, TP_PROTO(struct bio *bio), TP_ARGS(bio) ); DEFINE_EVENT(bcache_bio, bcache_bypass_congested, TP_PROTO(struct bio *bio), TP_ARGS(bio) ); TRACE_EVENT(bcache_read, TP_PROTO(struct bio *bio, bool hit, bool bypass), TP_ARGS(bio, hit, bypass), TP_STRUCT__entry( __field(dev_t, dev ) __field(sector_t, sector ) __field(unsigned int, nr_sector ) __array(char, rwbs, 6 ) __field(bool, cache_hit ) __field(bool, bypass ) ), TP_fast_assign( __entry->dev = bio_dev(bio); __entry->sector = bio->bi_iter.bi_sector; __entry->nr_sector = bio->bi_iter.bi_size >> 9; blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size); __entry->cache_hit = hit; __entry->bypass = bypass; ), TP_printk("%d,%d %s %llu + %u hit %u bypass %u", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, (unsigned long long)__entry->sector, __entry->nr_sector, __entry->cache_hit, __entry->bypass) ); TRACE_EVENT(bcache_write, TP_PROTO(struct cache_set *c, u64 inode, struct bio *bio, bool writeback, bool bypass), TP_ARGS(c, inode, bio, writeback, bypass), TP_STRUCT__entry( __array(char, uuid, 16 ) __field(u64, inode ) __field(sector_t, sector ) __field(unsigned int, nr_sector ) __array(char, rwbs, 6 ) __field(bool, writeback ) __field(bool, bypass ) ), TP_fast_assign( memcpy(__entry->uuid, c->sb.set_uuid, 16); __entry->inode = inode; __entry->sector = bio->bi_iter.bi_sector; __entry->nr_sector = bio->bi_iter.bi_size >> 9; blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size); __entry->writeback = writeback; __entry->bypass = bypass; ), TP_printk("%pU inode %llu %s %llu + %u hit %u bypass %u", __entry->uuid, __entry->inode, __entry->rwbs, (unsigned long long)__entry->sector, __entry->nr_sector, __entry->writeback, __entry->bypass) ); DEFINE_EVENT(bcache_bio, bcache_read_retry, TP_PROTO(struct bio *bio), TP_ARGS(bio) ); DEFINE_EVENT(bkey, bcache_cache_insert, TP_PROTO(struct bkey *k), TP_ARGS(k) ); /* Journal */ DECLARE_EVENT_CLASS(cache_set, TP_PROTO(struct cache_set *c), TP_ARGS(c), TP_STRUCT__entry( __array(char, uuid, 16 ) ), TP_fast_assign( memcpy(__entry->uuid, c->sb.set_uuid, 16); ), TP_printk("%pU", __entry->uuid) ); DEFINE_EVENT(bkey, bcache_journal_replay_key, TP_PROTO(struct bkey *k), TP_ARGS(k) ); DEFINE_EVENT(cache_set, bcache_journal_full, TP_PROTO(struct cache_set *c), TP_ARGS(c) ); DEFINE_EVENT(cache_set, bcache_journal_entry_full, TP_PROTO(struct cache_set *c), TP_ARGS(c) ); DEFINE_EVENT(bcache_bio, bcache_journal_write, TP_PROTO(struct bio *bio), TP_ARGS(bio) ); /* Btree */ DEFINE_EVENT(cache_set, bcache_btree_cache_cannibalize, TP_PROTO(struct cache_set *c), TP_ARGS(c) ); DEFINE_EVENT(btree_node, bcache_btree_read, TP_PROTO(struct btree *b), TP_ARGS(b) ); TRACE_EVENT(bcache_btree_write, TP_PROTO(struct btree *b), TP_ARGS(b), TP_STRUCT__entry( __field(size_t, bucket ) __field(unsigned, block ) __field(unsigned, keys ) ), TP_fast_assign( __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0); __entry->block = b->written; __entry->keys = b->keys.set[b->keys.nsets].data->keys; ), TP_printk("bucket %zu", __entry->bucket) ); DEFINE_EVENT(btree_node, bcache_btree_node_alloc, TP_PROTO(struct btree *b), TP_ARGS(b) ); DEFINE_EVENT(cache_set, bcache_btree_node_alloc_fail, TP_PROTO(struct cache_set *c), TP_ARGS(c) ); DEFINE_EVENT(btree_node, bcache_btree_node_free, TP_PROTO(struct btree *b), TP_ARGS(b) ); TRACE_EVENT(bcache_btree_gc_coalesce, TP_PROTO(unsigned nodes), TP_ARGS(nodes), TP_STRUCT__entry( __field(unsigned, nodes ) ), TP_fast_assign( __entry->nodes = nodes; ), TP_printk("coalesced %u nodes", __entry->nodes) ); DEFINE_EVENT(cache_set, bcache_gc_start, TP_PROTO(struct cache_set *c), TP_ARGS(c) ); DEFINE_EVENT(cache_set, bcache_gc_end, TP_PROTO(struct cache_set *c), TP_ARGS(c) ); DEFINE_EVENT(bkey, bcache_gc_copy, TP_PROTO(struct bkey *k), TP_ARGS(k) ); DEFINE_EVENT(bkey, bcache_gc_copy_collision, TP_PROTO(struct bkey *k), TP_ARGS(k) ); TRACE_EVENT(bcache_btree_insert_key, TP_PROTO(struct btree *b, struct bkey *k, unsigned op, unsigned status), TP_ARGS(b, k, op, status), TP_STRUCT__entry( __field(u64, btree_node ) __field(u32, btree_level ) __field(u32, inode ) __field(u64, offset ) __field(u32, size ) __field(u8, dirty ) __field(u8, op ) __field(u8, status ) ), TP_fast_assign( __entry->btree_node = PTR_BUCKET_NR(b->c, &b->key, 0); __entry->btree_level = b->level; __entry->inode = KEY_INODE(k); __entry->offset = KEY_OFFSET(k); __entry->size = KEY_SIZE(k); __entry->dirty = KEY_DIRTY(k); __entry->op = op; __entry->status = status; ), TP_printk("%u for %u at %llu(%u): %u:%llu len %u dirty %u", __entry->status, __entry->op, __entry->btree_node, __entry->btree_level, __entry->inode, __entry->offset, __entry->size, __entry->dirty) ); DECLARE_EVENT_CLASS(btree_split, TP_PROTO(struct btree *b, unsigned keys), TP_ARGS(b, keys), TP_STRUCT__entry( __field(size_t, bucket ) __field(unsigned, keys ) ), TP_fast_assign( __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0); __entry->keys = keys; ), TP_printk("bucket %zu keys %u", __entry->bucket, __entry->keys) ); DEFINE_EVENT(btree_split, bcache_btree_node_split, TP_PROTO(struct btree *b, unsigned keys), TP_ARGS(b, keys) ); DEFINE_EVENT(btree_split, bcache_btree_node_compact, TP_PROTO(struct btree *b, unsigned keys), TP_ARGS(b, keys) ); DEFINE_EVENT(btree_node, bcache_btree_set_root, TP_PROTO(struct btree *b), TP_ARGS(b) ); TRACE_EVENT(bcache_keyscan, TP_PROTO(unsigned nr_found, unsigned start_inode, uint64_t start_offset, unsigned end_inode, uint64_t end_offset), TP_ARGS(nr_found, start_inode, start_offset, end_inode, end_offset), TP_STRUCT__entry( __field(__u32, nr_found ) __field(__u32, start_inode ) __field(__u64, start_offset ) __field(__u32, end_inode ) __field(__u64, end_offset ) ), TP_fast_assign( __entry->nr_found = nr_found; __entry->start_inode = start_inode; __entry->start_offset = start_offset; __entry->end_inode = end_inode; __entry->end_offset = end_offset; ), TP_printk("found %u keys from %u:%llu to %u:%llu", __entry->nr_found, __entry->start_inode, __entry->start_offset, __entry->end_inode, __entry->end_offset) ); /* Allocator */ TRACE_EVENT(bcache_invalidate, TP_PROTO(struct cache *ca, size_t bucket), TP_ARGS(ca, bucket), TP_STRUCT__entry( __field(unsigned, sectors ) __field(dev_t, dev ) __field(__u64, offset ) ), TP_fast_assign( __entry->dev = ca->bdev->bd_dev; __entry->offset = bucket << ca->set->bucket_bits; __entry->sectors = GC_SECTORS_USED(&ca->buckets[bucket]); ), TP_printk("invalidated %u sectors at %d,%d sector=%llu", __entry->sectors, MAJOR(__entry->dev), MINOR(__entry->dev), __entry->offset) ); TRACE_EVENT(bcache_alloc, TP_PROTO(struct cache *ca, size_t bucket), TP_ARGS(ca, bucket), TP_STRUCT__entry( __field(dev_t, dev ) __field(__u64, offset ) ), TP_fast_assign( __entry->dev = ca->bdev->bd_dev; __entry->offset = bucket << ca->set->bucket_bits; ), TP_printk("allocated %d,%d sector=%llu", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->offset) ); TRACE_EVENT(bcache_alloc_fail, TP_PROTO(struct cache *ca, unsigned reserve), TP_ARGS(ca, reserve), TP_STRUCT__entry( __field(dev_t, dev ) __field(unsigned, free ) __field(unsigned, free_inc ) __field(unsigned, blocked ) ), TP_fast_assign( __entry->dev = ca->bdev->bd_dev; __entry->free = fifo_used(&ca->free[reserve]); __entry->free_inc = fifo_used(&ca->free_inc); __entry->blocked = atomic_read(&ca->set->prio_blocked); ), TP_printk("alloc fail %d,%d free %u free_inc %u blocked %u", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->free, __entry->free_inc, __entry->blocked) ); /* Background writeback */ DEFINE_EVENT(bkey, bcache_writeback, TP_PROTO(struct bkey *k), TP_ARGS(k) ); DEFINE_EVENT(bkey, bcache_writeback_collision, TP_PROTO(struct bkey *k), TP_ARGS(k) ); #endif /* _TRACE_BCACHE_H */ /* This part must be outside protection */ #include <trace/define_trace.h>