aboutsummaryrefslogtreecommitdiffstats
path: root/include/trace
diff options
context:
space:
mode:
authorroot <root@artemis.panaceas.org>2015-12-25 04:40:36 +0000
committerroot <root@artemis.panaceas.org>2015-12-25 04:40:36 +0000
commit849369d6c66d3054688672f97d31fceb8e8230fb (patch)
tree6135abc790ca67dedbe07c39806591e70eda81ce /include/trace
downloadlinux-3.0.35-kobo-849369d6c66d3054688672f97d31fceb8e8230fb.tar.gz
linux-3.0.35-kobo-849369d6c66d3054688672f97d31fceb8e8230fb.tar.bz2
linux-3.0.35-kobo-849369d6c66d3054688672f97d31fceb8e8230fb.zip
initial_commit
Diffstat (limited to 'include/trace')
-rw-r--r--include/trace/define_trace.h123
-rw-r--r--include/trace/events/asoc.h260
-rw-r--r--include/trace/events/block.h569
-rw-r--r--include/trace/events/btrfs.h667
-rw-r--r--include/trace/events/compaction.h74
-rw-r--r--include/trace/events/cpufreq_interactive.h87
-rw-r--r--include/trace/events/ext4.h1526
-rw-r--r--include/trace/events/gfpflags.h41
-rw-r--r--include/trace/events/gpio.h56
-rw-r--r--include/trace/events/irq.h150
-rw-r--r--include/trace/events/jbd2.h233
-rw-r--r--include/trace/events/kmem.h308
-rw-r--r--include/trace/events/kvm.h312
-rw-r--r--include/trace/events/lock.h86
-rw-r--r--include/trace/events/mce.h69
-rw-r--r--include/trace/events/module.h131
-rw-r--r--include/trace/events/napi.h38
-rw-r--r--include/trace/events/net.h84
-rw-r--r--include/trace/events/power.h240
-rw-r--r--include/trace/events/regulator.h141
-rw-r--r--include/trace/events/sched.h397
-rw-r--r--include/trace/events/scsi.h365
-rw-r--r--include/trace/events/signal.h166
-rw-r--r--include/trace/events/skb.h75
-rw-r--r--include/trace/events/syscalls.h75
-rw-r--r--include/trace/events/timer.h329
-rw-r--r--include/trace/events/vmscan.h400
-rw-r--r--include/trace/events/workqueue.h121
-rw-r--r--include/trace/events/writeback.h196
-rw-r--r--include/trace/ftrace.h775
-rw-r--r--include/trace/syscall.h57
31 files changed, 8151 insertions, 0 deletions
diff --git a/include/trace/define_trace.h b/include/trace/define_trace.h
new file mode 100644
index 00000000..da39b226
--- /dev/null
+++ b/include/trace/define_trace.h
@@ -0,0 +1,123 @@
+/*
+ * Trace files that want to automate creationg of all tracepoints defined
+ * in their file should include this file. The following are macros that the
+ * trace file may define:
+ *
+ * TRACE_SYSTEM defines the system the tracepoint is for
+ *
+ * TRACE_INCLUDE_FILE if the file name is something other than TRACE_SYSTEM.h
+ * This macro may be defined to tell define_trace.h what file to include.
+ * Note, leave off the ".h".
+ *
+ * TRACE_INCLUDE_PATH if the path is something other than core kernel include/trace
+ * then this macro can define the path to use. Note, the path is relative to
+ * define_trace.h, not the file including it. Full path names for out of tree
+ * modules must be used.
+ */
+
+#ifdef CREATE_TRACE_POINTS
+
+/* Prevent recursion */
+#undef CREATE_TRACE_POINTS
+
+#include <linux/stringify.h>
+/*
+ * module.h includes tracepoints, and because ftrace.h
+ * pulls in module.h:
+ * trace/ftrace.h -> linux/ftrace_event.h -> linux/perf_event.h ->
+ * linux/ftrace.h -> linux/module.h
+ * we must include module.h here before we play with any of
+ * the TRACE_EVENT() macros, otherwise the tracepoints included
+ * by module.h may break the build.
+ */
+#include <linux/module.h>
+
+#undef TRACE_EVENT
+#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
+ DEFINE_TRACE(name)
+
+#undef TRACE_EVENT_CONDITION
+#define TRACE_EVENT_CONDITION(name, proto, args, cond, tstruct, assign, print) \
+ TRACE_EVENT(name, \
+ PARAMS(proto), \
+ PARAMS(args), \
+ PARAMS(tstruct), \
+ PARAMS(assign), \
+ PARAMS(print))
+
+#undef TRACE_EVENT_FN
+#define TRACE_EVENT_FN(name, proto, args, tstruct, \
+ assign, print, reg, unreg) \
+ DEFINE_TRACE_FN(name, reg, unreg)
+
+#undef DEFINE_EVENT
+#define DEFINE_EVENT(template, name, proto, args) \
+ DEFINE_TRACE(name)
+
+#undef DEFINE_EVENT_PRINT
+#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
+ DEFINE_TRACE(name)
+
+#undef DEFINE_EVENT_CONDITION
+#define DEFINE_EVENT_CONDITION(template, name, proto, args, cond) \
+ DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
+
+#undef DECLARE_TRACE
+#define DECLARE_TRACE(name, proto, args) \
+ DEFINE_TRACE(name)
+
+#undef TRACE_INCLUDE
+#undef __TRACE_INCLUDE
+
+#ifndef TRACE_INCLUDE_FILE
+# define TRACE_INCLUDE_FILE TRACE_SYSTEM
+# define UNDEF_TRACE_INCLUDE_FILE
+#endif
+
+#ifndef TRACE_INCLUDE_PATH
+# define __TRACE_INCLUDE(system) <trace/events/system.h>
+# define UNDEF_TRACE_INCLUDE_PATH
+#else
+# define __TRACE_INCLUDE(system) __stringify(TRACE_INCLUDE_PATH/system.h)
+#endif
+
+# define TRACE_INCLUDE(system) __TRACE_INCLUDE(system)
+
+/* Let the trace headers be reread */
+#define TRACE_HEADER_MULTI_READ
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+
+/* Make all open coded DECLARE_TRACE nops */
+#undef DECLARE_TRACE
+#define DECLARE_TRACE(name, proto, args)
+
+#ifdef CONFIG_EVENT_TRACING
+#include <trace/ftrace.h>
+#endif
+
+#undef TRACE_EVENT
+#undef TRACE_EVENT_FN
+#undef TRACE_EVENT_CONDITION
+#undef DECLARE_EVENT_CLASS
+#undef DEFINE_EVENT
+#undef DEFINE_EVENT_PRINT
+#undef DEFINE_EVENT_CONDITION
+#undef TRACE_HEADER_MULTI_READ
+#undef DECLARE_TRACE
+
+/* Only undef what we defined in this file */
+#ifdef UNDEF_TRACE_INCLUDE_FILE
+# undef TRACE_INCLUDE_FILE
+# undef UNDEF_TRACE_INCLUDE_FILE
+#endif
+
+#ifdef UNDEF_TRACE_INCLUDE_PATH
+# undef TRACE_INCLUDE_PATH
+# undef UNDEF_TRACE_INCLUDE_PATH
+#endif
+
+/* We may be processing more files */
+#define CREATE_TRACE_POINTS
+
+#endif /* CREATE_TRACE_POINTS */
diff --git a/include/trace/events/asoc.h b/include/trace/events/asoc.h
new file mode 100644
index 00000000..ae973d2e
--- /dev/null
+++ b/include/trace/events/asoc.h
@@ -0,0 +1,260 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM asoc
+
+#if !defined(_TRACE_ASOC_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_ASOC_H
+
+#include <linux/ktime.h>
+#include <linux/tracepoint.h>
+
+struct snd_soc_jack;
+struct snd_soc_codec;
+struct snd_soc_card;
+struct snd_soc_dapm_widget;
+
+/*
+ * Log register events
+ */
+DECLARE_EVENT_CLASS(snd_soc_reg,
+
+ TP_PROTO(struct snd_soc_codec *codec, unsigned int reg,
+ unsigned int val),
+
+ TP_ARGS(codec, reg, val),
+
+ TP_STRUCT__entry(
+ __string( name, codec->name )
+ __field( int, id )
+ __field( unsigned int, reg )
+ __field( unsigned int, val )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, codec->name);
+ __entry->id = codec->id;
+ __entry->reg = reg;
+ __entry->val = val;
+ ),
+
+ TP_printk("codec=%s.%d reg=%x val=%x", __get_str(name),
+ (int)__entry->id, (unsigned int)__entry->reg,
+ (unsigned int)__entry->val)
+);
+
+DEFINE_EVENT(snd_soc_reg, snd_soc_reg_write,
+
+ TP_PROTO(struct snd_soc_codec *codec, unsigned int reg,
+ unsigned int val),
+
+ TP_ARGS(codec, reg, val)
+
+);
+
+DEFINE_EVENT(snd_soc_reg, snd_soc_reg_read,
+
+ TP_PROTO(struct snd_soc_codec *codec, unsigned int reg,
+ unsigned int val),
+
+ TP_ARGS(codec, reg, val)
+
+);
+
+DECLARE_EVENT_CLASS(snd_soc_card,
+
+ TP_PROTO(struct snd_soc_card *card, int val),
+
+ TP_ARGS(card, val),
+
+ TP_STRUCT__entry(
+ __string( name, card->name )
+ __field( int, val )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, card->name);
+ __entry->val = val;
+ ),
+
+ TP_printk("card=%s val=%d", __get_str(name), (int)__entry->val)
+);
+
+DEFINE_EVENT(snd_soc_card, snd_soc_bias_level_start,
+
+ TP_PROTO(struct snd_soc_card *card, int val),
+
+ TP_ARGS(card, val)
+
+);
+
+DEFINE_EVENT(snd_soc_card, snd_soc_bias_level_done,
+
+ TP_PROTO(struct snd_soc_card *card, int val),
+
+ TP_ARGS(card, val)
+
+);
+
+DECLARE_EVENT_CLASS(snd_soc_dapm_basic,
+
+ TP_PROTO(struct snd_soc_card *card),
+
+ TP_ARGS(card),
+
+ TP_STRUCT__entry(
+ __string( name, card->name )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, card->name);
+ ),
+
+ TP_printk("card=%s", __get_str(name))
+);
+
+DEFINE_EVENT(snd_soc_dapm_basic, snd_soc_dapm_start,
+
+ TP_PROTO(struct snd_soc_card *card),
+
+ TP_ARGS(card)
+
+);
+
+DEFINE_EVENT(snd_soc_dapm_basic, snd_soc_dapm_done,
+
+ TP_PROTO(struct snd_soc_card *card),
+
+ TP_ARGS(card)
+
+);
+
+DECLARE_EVENT_CLASS(snd_soc_dapm_widget,
+
+ TP_PROTO(struct snd_soc_dapm_widget *w, int val),
+
+ TP_ARGS(w, val),
+
+ TP_STRUCT__entry(
+ __string( name, w->name )
+ __field( int, val )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, w->name);
+ __entry->val = val;
+ ),
+
+ TP_printk("widget=%s val=%d", __get_str(name),
+ (int)__entry->val)
+);
+
+DEFINE_EVENT(snd_soc_dapm_widget, snd_soc_dapm_widget_power,
+
+ TP_PROTO(struct snd_soc_dapm_widget *w, int val),
+
+ TP_ARGS(w, val)
+
+);
+
+DEFINE_EVENT(snd_soc_dapm_widget, snd_soc_dapm_widget_event_start,
+
+ TP_PROTO(struct snd_soc_dapm_widget *w, int val),
+
+ TP_ARGS(w, val)
+
+);
+
+DEFINE_EVENT(snd_soc_dapm_widget, snd_soc_dapm_widget_event_done,
+
+ TP_PROTO(struct snd_soc_dapm_widget *w, int val),
+
+ TP_ARGS(w, val)
+
+);
+
+TRACE_EVENT(snd_soc_jack_irq,
+
+ TP_PROTO(const char *name),
+
+ TP_ARGS(name),
+
+ TP_STRUCT__entry(
+ __string( name, name )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, name);
+ ),
+
+ TP_printk("%s", __get_str(name))
+);
+
+TRACE_EVENT(snd_soc_jack_report,
+
+ TP_PROTO(struct snd_soc_jack *jack, int mask, int val),
+
+ TP_ARGS(jack, mask, val),
+
+ TP_STRUCT__entry(
+ __string( name, jack->jack->name )
+ __field( int, mask )
+ __field( int, val )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, jack->jack->name);
+ __entry->mask = mask;
+ __entry->val = val;
+ ),
+
+ TP_printk("jack=%s %x/%x", __get_str(name), (int)__entry->val,
+ (int)__entry->mask)
+);
+
+TRACE_EVENT(snd_soc_jack_notify,
+
+ TP_PROTO(struct snd_soc_jack *jack, int val),
+
+ TP_ARGS(jack, val),
+
+ TP_STRUCT__entry(
+ __string( name, jack->jack->name )
+ __field( int, val )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, jack->jack->name);
+ __entry->val = val;
+ ),
+
+ TP_printk("jack=%s %x", __get_str(name), (int)__entry->val)
+);
+
+TRACE_EVENT(snd_soc_cache_sync,
+
+ TP_PROTO(struct snd_soc_codec *codec, const char *type,
+ const char *status),
+
+ TP_ARGS(codec, type, status),
+
+ TP_STRUCT__entry(
+ __string( name, codec->name )
+ __string( status, status )
+ __string( type, type )
+ __field( int, id )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, codec->name);
+ __assign_str(status, status);
+ __assign_str(type, type);
+ __entry->id = codec->id;
+ ),
+
+ TP_printk("codec=%s.%d type=%s status=%s", __get_str(name),
+ (int)__entry->id, __get_str(type), __get_str(status))
+);
+
+#endif /* _TRACE_ASOC_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
new file mode 100644
index 00000000..bf366547
--- /dev/null
+++ b/include/trace/events/block.h
@@ -0,0 +1,569 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM block
+
+#if !defined(_TRACE_BLOCK_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_BLOCK_H
+
+#include <linux/blktrace_api.h>
+#include <linux/blkdev.h>
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(block_rq_with_error,
+
+ TP_PROTO(struct request_queue *q, struct request *rq),
+
+ TP_ARGS(q, rq),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( sector_t, sector )
+ __field( unsigned int, nr_sector )
+ __field( int, errors )
+ __array( char, rwbs, 6 )
+ __dynamic_array( char, cmd, blk_cmd_buf_len(rq) )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
+ __entry->sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
+ 0 : blk_rq_pos(rq);
+ __entry->nr_sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
+ 0 : blk_rq_sectors(rq);
+ __entry->errors = rq->errors;
+
+ blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
+ blk_dump_cmd(__get_str(cmd), rq);
+ ),
+
+ TP_printk("%d,%d %s (%s) %llu + %u [%d]",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->rwbs, __get_str(cmd),
+ (unsigned long long)__entry->sector,
+ __entry->nr_sector, __entry->errors)
+);
+
+/**
+ * block_rq_abort - abort block operation request
+ * @q: queue containing the block operation request
+ * @rq: block IO operation request
+ *
+ * Called immediately after pending block IO operation request @rq in
+ * queue @q is aborted. The fields in the operation request @rq
+ * can be examined to determine which device and sectors the pending
+ * operation would access.
+ */
+DEFINE_EVENT(block_rq_with_error, block_rq_abort,
+
+ TP_PROTO(struct request_queue *q, struct request *rq),
+
+ TP_ARGS(q, rq)
+);
+
+/**
+ * block_rq_requeue - place block IO request back on a queue
+ * @q: queue holding operation
+ * @rq: block IO operation request
+ *
+ * The block operation request @rq is being placed back into queue
+ * @q. For some reason the request was not completed and needs to be
+ * put back in the queue.
+ */
+DEFINE_EVENT(block_rq_with_error, block_rq_requeue,
+
+ TP_PROTO(struct request_queue *q, struct request *rq),
+
+ TP_ARGS(q, rq)
+);
+
+/**
+ * block_rq_complete - block IO operation completed by device driver
+ * @q: queue containing the block operation request
+ * @rq: block operations request
+ *
+ * The block_rq_complete tracepoint event indicates that some portion
+ * of operation request has been completed by the device driver. If
+ * the @rq->bio is %NULL, then there is absolutely no additional work to
+ * do for the request. If @rq->bio is non-NULL then there is
+ * additional work required to complete the request.
+ */
+DEFINE_EVENT(block_rq_with_error, block_rq_complete,
+
+ TP_PROTO(struct request_queue *q, struct request *rq),
+
+ TP_ARGS(q, rq)
+);
+
+DECLARE_EVENT_CLASS(block_rq,
+
+ TP_PROTO(struct request_queue *q, struct request *rq),
+
+ TP_ARGS(q, rq),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( sector_t, sector )
+ __field( unsigned int, nr_sector )
+ __field( unsigned int, bytes )
+ __array( char, rwbs, 6 )
+ __array( char, comm, TASK_COMM_LEN )
+ __dynamic_array( char, cmd, blk_cmd_buf_len(rq) )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
+ __entry->sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
+ 0 : blk_rq_pos(rq);
+ __entry->nr_sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
+ 0 : blk_rq_sectors(rq);
+ __entry->bytes = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
+ blk_rq_bytes(rq) : 0;
+
+ blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
+ blk_dump_cmd(__get_str(cmd), rq);
+ memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
+ ),
+
+ TP_printk("%d,%d %s %u (%s) %llu + %u [%s]",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->rwbs, __entry->bytes, __get_str(cmd),
+ (unsigned long long)__entry->sector,
+ __entry->nr_sector, __entry->comm)
+);
+
+/**
+ * block_rq_insert - insert block operation request into queue
+ * @q: target queue
+ * @rq: block IO operation request
+ *
+ * Called immediately before block operation request @rq is inserted
+ * into queue @q. The fields in the operation request @rq struct can
+ * be examined to determine which device and sectors the pending
+ * operation would access.
+ */
+DEFINE_EVENT(block_rq, block_rq_insert,
+
+ TP_PROTO(struct request_queue *q, struct request *rq),
+
+ TP_ARGS(q, rq)
+);
+
+/**
+ * block_rq_issue - issue pending block IO request operation to device driver
+ * @q: queue holding operation
+ * @rq: block IO operation operation request
+ *
+ * Called when block operation request @rq from queue @q is sent to a
+ * device driver for processing.
+ */
+DEFINE_EVENT(block_rq, block_rq_issue,
+
+ TP_PROTO(struct request_queue *q, struct request *rq),
+
+ TP_ARGS(q, rq)
+);
+
+/**
+ * block_bio_bounce - used bounce buffer when processing block operation
+ * @q: queue holding the block operation
+ * @bio: block operation
+ *
+ * A bounce buffer was used to handle the block operation @bio in @q.
+ * This occurs when hardware limitations prevent a direct transfer of
+ * data between the @bio data memory area and the IO device. Use of a
+ * bounce buffer requires extra copying of data and decreases
+ * performance.
+ */
+TRACE_EVENT(block_bio_bounce,
+
+ TP_PROTO(struct request_queue *q, struct bio *bio),
+
+ TP_ARGS(q, bio),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( sector_t, sector )
+ __field( unsigned int, nr_sector )
+ __array( char, rwbs, 6 )
+ __array( char, comm, TASK_COMM_LEN )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = bio->bi_bdev ?
+ bio->bi_bdev->bd_dev : 0;
+ __entry->sector = bio->bi_sector;
+ __entry->nr_sector = bio->bi_size >> 9;
+ blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+ memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
+ ),
+
+ TP_printk("%d,%d %s %llu + %u [%s]",
+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
+ (unsigned long long)__entry->sector,
+ __entry->nr_sector, __entry->comm)
+);
+
+/**
+ * block_bio_complete - completed all work on the block operation
+ * @q: queue holding the block operation
+ * @bio: block operation completed
+ * @error: io error value
+ *
+ * This tracepoint indicates there is no further work to do on this
+ * block IO operation @bio.
+ */
+TRACE_EVENT(block_bio_complete,
+
+ TP_PROTO(struct request_queue *q, struct bio *bio, int error),
+
+ TP_ARGS(q, bio, error),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( sector_t, sector )
+ __field( unsigned, nr_sector )
+ __field( int, error )
+ __array( char, rwbs, 6 )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = bio->bi_bdev->bd_dev;
+ __entry->sector = bio->bi_sector;
+ __entry->nr_sector = bio->bi_size >> 9;
+ __entry->error = error;
+ blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+ ),
+
+ TP_printk("%d,%d %s %llu + %u [%d]",
+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
+ (unsigned long long)__entry->sector,
+ __entry->nr_sector, __entry->error)
+);
+
+DECLARE_EVENT_CLASS(block_bio,
+
+ TP_PROTO(struct request_queue *q, struct bio *bio),
+
+ TP_ARGS(q, bio),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( sector_t, sector )
+ __field( unsigned int, nr_sector )
+ __array( char, rwbs, 6 )
+ __array( char, comm, TASK_COMM_LEN )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = bio->bi_bdev->bd_dev;
+ __entry->sector = bio->bi_sector;
+ __entry->nr_sector = bio->bi_size >> 9;
+ blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+ memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
+ ),
+
+ TP_printk("%d,%d %s %llu + %u [%s]",
+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
+ (unsigned long long)__entry->sector,
+ __entry->nr_sector, __entry->comm)
+);
+
+/**
+ * block_bio_backmerge - merging block operation to the end of an existing operation
+ * @q: queue holding operation
+ * @bio: new block operation to merge
+ *
+ * Merging block request @bio to the end of an existing block request
+ * in queue @q.
+ */
+DEFINE_EVENT(block_bio, block_bio_backmerge,
+
+ TP_PROTO(struct request_queue *q, struct bio *bio),
+
+ TP_ARGS(q, bio)
+);
+
+/**
+ * block_bio_frontmerge - merging block operation to the beginning of an existing operation
+ * @q: queue holding operation
+ * @bio: new block operation to merge
+ *
+ * Merging block IO operation @bio to the beginning of an existing block
+ * operation in queue @q.
+ */
+DEFINE_EVENT(block_bio, block_bio_frontmerge,
+
+ TP_PROTO(struct request_queue *q, struct bio *bio),
+
+ TP_ARGS(q, bio)
+);
+
+/**
+ * block_bio_queue - putting new block IO operation in queue
+ * @q: queue holding operation
+ * @bio: new block operation
+ *
+ * About to place the block IO operation @bio into queue @q.
+ */
+DEFINE_EVENT(block_bio, block_bio_queue,
+
+ TP_PROTO(struct request_queue *q, struct bio *bio),
+
+ TP_ARGS(q, bio)
+);
+
+DECLARE_EVENT_CLASS(block_get_rq,
+
+ TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
+
+ TP_ARGS(q, bio, rw),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( sector_t, sector )
+ __field( unsigned int, nr_sector )
+ __array( char, rwbs, 6 )
+ __array( char, comm, TASK_COMM_LEN )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = bio ? bio->bi_bdev->bd_dev : 0;
+ __entry->sector = bio ? bio->bi_sector : 0;
+ __entry->nr_sector = bio ? bio->bi_size >> 9 : 0;
+ blk_fill_rwbs(__entry->rwbs,
+ bio ? bio->bi_rw : 0, __entry->nr_sector);
+ memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
+ ),
+
+ TP_printk("%d,%d %s %llu + %u [%s]",
+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
+ (unsigned long long)__entry->sector,
+ __entry->nr_sector, __entry->comm)
+);
+
+/**
+ * block_getrq - get a free request entry in queue for block IO operations
+ * @q: queue for operations
+ * @bio: pending block IO operation
+ * @rw: low bit indicates a read (%0) or a write (%1)
+ *
+ * A request struct for queue @q has been allocated to handle the
+ * block IO operation @bio.
+ */
+DEFINE_EVENT(block_get_rq, block_getrq,
+
+ TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
+
+ TP_ARGS(q, bio, rw)
+);
+
+/**
+ * block_sleeprq - waiting to get a free request entry in queue for block IO operation
+ * @q: queue for operation
+ * @bio: pending block IO operation
+ * @rw: low bit indicates a read (%0) or a write (%1)
+ *
+ * In the case where a request struct cannot be provided for queue @q
+ * the process needs to wait for an request struct to become
+ * available. This tracepoint event is generated each time the
+ * process goes to sleep waiting for request struct become available.
+ */
+DEFINE_EVENT(block_get_rq, block_sleeprq,
+
+ TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
+
+ TP_ARGS(q, bio, rw)
+);
+
+/**
+ * block_plug - keep operations requests in request queue
+ * @q: request queue to plug
+ *
+ * Plug the request queue @q. Do not allow block operation requests
+ * to be sent to the device driver. Instead, accumulate requests in
+ * the queue to improve throughput performance of the block device.
+ */
+TRACE_EVENT(block_plug,
+
+ TP_PROTO(struct request_queue *q),
+
+ TP_ARGS(q),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
+ ),
+
+ TP_printk("[%s]", __entry->comm)
+);
+
+DECLARE_EVENT_CLASS(block_unplug,
+
+ TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
+
+ TP_ARGS(q, depth, explicit),
+
+ TP_STRUCT__entry(
+ __field( int, nr_rq )
+ __array( char, comm, TASK_COMM_LEN )
+ ),
+
+ TP_fast_assign(
+ __entry->nr_rq = depth;
+ memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
+ ),
+
+ TP_printk("[%s] %d", __entry->comm, __entry->nr_rq)
+);
+
+/**
+ * block_unplug - release of operations requests in request queue
+ * @q: request queue to unplug
+ * @depth: number of requests just added to the queue
+ * @explicit: whether this was an explicit unplug, or one from schedule()
+ *
+ * Unplug request queue @q because device driver is scheduled to work
+ * on elements in the request queue.
+ */
+DEFINE_EVENT(block_unplug, block_unplug,
+
+ TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
+
+ TP_ARGS(q, depth, explicit)
+);
+
+/**
+ * block_split - split a single bio struct into two bio structs
+ * @q: queue containing the bio
+ * @bio: block operation being split
+ * @new_sector: The starting sector for the new bio
+ *
+ * The bio request @bio in request queue @q needs to be split into two
+ * bio requests. The newly created @bio request starts at
+ * @new_sector. This split may be required due to hardware limitation
+ * such as operation crossing device boundaries in a RAID system.
+ */
+TRACE_EVENT(block_split,
+
+ TP_PROTO(struct request_queue *q, struct bio *bio,
+ unsigned int new_sector),
+
+ TP_ARGS(q, bio, new_sector),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( sector_t, sector )
+ __field( sector_t, new_sector )
+ __array( char, rwbs, 6 )
+ __array( char, comm, TASK_COMM_LEN )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = bio->bi_bdev->bd_dev;
+ __entry->sector = bio->bi_sector;
+ __entry->new_sector = new_sector;
+ blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+ memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
+ ),
+
+ TP_printk("%d,%d %s %llu / %llu [%s]",
+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
+ (unsigned long long)__entry->sector,
+ (unsigned long long)__entry->new_sector,
+ __entry->comm)
+);
+
+/**
+ * block_bio_remap - map request for a logical device to the raw device
+ * @q: queue holding the operation
+ * @bio: revised operation
+ * @dev: device for the operation
+ * @from: original sector for the operation
+ *
+ * An operation for a logical device has been mapped to the
+ * raw block device.
+ */
+TRACE_EVENT(block_bio_remap,
+
+ TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev,
+ sector_t from),
+
+ TP_ARGS(q, bio, dev, from),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( sector_t, sector )
+ __field( unsigned int, nr_sector )
+ __field( dev_t, old_dev )
+ __field( sector_t, old_sector )
+ __array( char, rwbs, 6 )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = bio->bi_bdev->bd_dev;
+ __entry->sector = bio->bi_sector;
+ __entry->nr_sector = bio->bi_size >> 9;
+ __entry->old_dev = dev;
+ __entry->old_sector = from;
+ blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+ ),
+
+ TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
+ (unsigned long long)__entry->sector,
+ __entry->nr_sector,
+ MAJOR(__entry->old_dev), MINOR(__entry->old_dev),
+ (unsigned long long)__entry->old_sector)
+);
+
+/**
+ * block_rq_remap - map request for a block operation request
+ * @q: queue holding the operation
+ * @rq: block IO operation request
+ * @dev: device for the operation
+ * @from: original sector for the operation
+ *
+ * The block operation request @rq in @q has been remapped. The block
+ * operation request @rq holds the current information and @from hold
+ * the original sector.
+ */
+TRACE_EVENT(block_rq_remap,
+
+ TP_PROTO(struct request_queue *q, struct request *rq, dev_t dev,
+ sector_t from),
+
+ TP_ARGS(q, rq, dev, from),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( sector_t, sector )
+ __field( unsigned int, nr_sector )
+ __field( dev_t, old_dev )
+ __field( sector_t, old_sector )
+ __array( char, rwbs, 6 )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = disk_devt(rq->rq_disk);
+ __entry->sector = blk_rq_pos(rq);
+ __entry->nr_sector = blk_rq_sectors(rq);
+ __entry->old_dev = dev;
+ __entry->old_sector = from;
+ blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
+ ),
+
+ TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
+ (unsigned long long)__entry->sector,
+ __entry->nr_sector,
+ MAJOR(__entry->old_dev), MINOR(__entry->old_dev),
+ (unsigned long long)__entry->old_sector)
+);
+
+#endif /* _TRACE_BLOCK_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
+
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
new file mode 100644
index 00000000..4114129f
--- /dev/null
+++ b/include/trace/events/btrfs.h
@@ -0,0 +1,667 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM btrfs
+
+#if !defined(_TRACE_BTRFS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_BTRFS_H
+
+#include <linux/writeback.h>
+#include <linux/tracepoint.h>
+
+struct btrfs_root;
+struct btrfs_fs_info;
+struct btrfs_inode;
+struct extent_map;
+struct btrfs_ordered_extent;
+struct btrfs_delayed_ref_node;
+struct btrfs_delayed_tree_ref;
+struct btrfs_delayed_data_ref;
+struct btrfs_delayed_ref_head;
+struct map_lookup;
+struct extent_buffer;
+
+#define show_ref_type(type) \
+ __print_symbolic(type, \
+ { BTRFS_TREE_BLOCK_REF_KEY, "TREE_BLOCK_REF" }, \
+ { BTRFS_EXTENT_DATA_REF_KEY, "EXTENT_DATA_REF" }, \
+ { BTRFS_EXTENT_REF_V0_KEY, "EXTENT_REF_V0" }, \
+ { BTRFS_SHARED_BLOCK_REF_KEY, "SHARED_BLOCK_REF" }, \
+ { BTRFS_SHARED_DATA_REF_KEY, "SHARED_DATA_REF" })
+
+#define __show_root_type(obj) \
+ __print_symbolic_u64(obj, \
+ { BTRFS_ROOT_TREE_OBJECTID, "ROOT_TREE" }, \
+ { BTRFS_EXTENT_TREE_OBJECTID, "EXTENT_TREE" }, \
+ { BTRFS_CHUNK_TREE_OBJECTID, "CHUNK_TREE" }, \
+ { BTRFS_DEV_TREE_OBJECTID, "DEV_TREE" }, \
+ { BTRFS_FS_TREE_OBJECTID, "FS_TREE" }, \
+ { BTRFS_ROOT_TREE_DIR_OBJECTID, "ROOT_TREE_DIR" }, \
+ { BTRFS_CSUM_TREE_OBJECTID, "CSUM_TREE" }, \
+ { BTRFS_TREE_LOG_OBJECTID, "TREE_LOG" }, \
+ { BTRFS_TREE_RELOC_OBJECTID, "TREE_RELOC" }, \
+ { BTRFS_DATA_RELOC_TREE_OBJECTID, "DATA_RELOC_TREE" })
+
+#define show_root_type(obj) \
+ obj, ((obj >= BTRFS_DATA_RELOC_TREE_OBJECTID) || \
+ (obj <= BTRFS_CSUM_TREE_OBJECTID )) ? __show_root_type(obj) : "-"
+
+TRACE_EVENT(btrfs_transaction_commit,
+
+ TP_PROTO(struct btrfs_root *root),
+
+ TP_ARGS(root),
+
+ TP_STRUCT__entry(
+ __field( u64, generation )
+ __field( u64, root_objectid )
+ ),
+
+ TP_fast_assign(
+ __entry->generation = root->fs_info->generation;
+ __entry->root_objectid = root->root_key.objectid;
+ ),
+
+ TP_printk("root = %llu(%s), gen = %llu",
+ show_root_type(__entry->root_objectid),
+ (unsigned long long)__entry->generation)
+);
+
+DECLARE_EVENT_CLASS(btrfs__inode,
+
+ TP_PROTO(struct inode *inode),
+
+ TP_ARGS(inode),
+
+ TP_STRUCT__entry(
+ __field( ino_t, ino )
+ __field( blkcnt_t, blocks )
+ __field( u64, disk_i_size )
+ __field( u64, generation )
+ __field( u64, last_trans )
+ __field( u64, logged_trans )
+ __field( u64, root_objectid )
+ ),
+
+ TP_fast_assign(
+ __entry->ino = inode->i_ino;
+ __entry->blocks = inode->i_blocks;
+ __entry->disk_i_size = BTRFS_I(inode)->disk_i_size;
+ __entry->generation = BTRFS_I(inode)->generation;
+ __entry->last_trans = BTRFS_I(inode)->last_trans;
+ __entry->logged_trans = BTRFS_I(inode)->logged_trans;
+ __entry->root_objectid =
+ BTRFS_I(inode)->root->root_key.objectid;
+ ),
+
+ TP_printk("root = %llu(%s), gen = %llu, ino = %lu, blocks = %llu, "
+ "disk_i_size = %llu, last_trans = %llu, logged_trans = %llu",
+ show_root_type(__entry->root_objectid),
+ (unsigned long long)__entry->generation,
+ (unsigned long)__entry->ino,
+ (unsigned long long)__entry->blocks,
+ (unsigned long long)__entry->disk_i_size,
+ (unsigned long long)__entry->last_trans,
+ (unsigned long long)__entry->logged_trans)
+);
+
+DEFINE_EVENT(btrfs__inode, btrfs_inode_new,
+
+ TP_PROTO(struct inode *inode),
+
+ TP_ARGS(inode)
+);
+
+DEFINE_EVENT(btrfs__inode, btrfs_inode_request,
+
+ TP_PROTO(struct inode *inode),
+
+ TP_ARGS(inode)
+);
+
+DEFINE_EVENT(btrfs__inode, btrfs_inode_evict,
+
+ TP_PROTO(struct inode *inode),
+
+ TP_ARGS(inode)
+);
+
+#define __show_map_type(type) \
+ __print_symbolic_u64(type, \
+ { EXTENT_MAP_LAST_BYTE, "LAST_BYTE" }, \
+ { EXTENT_MAP_HOLE, "HOLE" }, \
+ { EXTENT_MAP_INLINE, "INLINE" }, \
+ { EXTENT_MAP_DELALLOC, "DELALLOC" })
+
+#define show_map_type(type) \
+ type, (type >= EXTENT_MAP_LAST_BYTE) ? "-" : __show_map_type(type)
+
+#define show_map_flags(flag) \
+ __print_flags(flag, "|", \
+ { EXTENT_FLAG_PINNED, "PINNED" }, \
+ { EXTENT_FLAG_COMPRESSED, "COMPRESSED" }, \
+ { EXTENT_FLAG_VACANCY, "VACANCY" }, \
+ { EXTENT_FLAG_PREALLOC, "PREALLOC" })
+
+TRACE_EVENT(btrfs_get_extent,
+
+ TP_PROTO(struct btrfs_root *root, struct extent_map *map),
+
+ TP_ARGS(root, map),
+
+ TP_STRUCT__entry(
+ __field( u64, root_objectid )
+ __field( u64, start )
+ __field( u64, len )
+ __field( u64, orig_start )
+ __field( u64, block_start )
+ __field( u64, block_len )
+ __field( unsigned long, flags )
+ __field( int, refs )
+ __field( unsigned int, compress_type )
+ ),
+
+ TP_fast_assign(
+ __entry->root_objectid = root->root_key.objectid;
+ __entry->start = map->start;
+ __entry->len = map->len;
+ __entry->orig_start = map->orig_start;
+ __entry->block_start = map->block_start;
+ __entry->block_len = map->block_len;
+ __entry->flags = map->flags;
+ __entry->refs = atomic_read(&map->refs);
+ __entry->compress_type = map->compress_type;
+ ),
+
+ TP_printk("root = %llu(%s), start = %llu, len = %llu, "
+ "orig_start = %llu, block_start = %llu(%s), "
+ "block_len = %llu, flags = %s, refs = %u, "
+ "compress_type = %u",
+ show_root_type(__entry->root_objectid),
+ (unsigned long long)__entry->start,
+ (unsigned long long)__entry->len,
+ (unsigned long long)__entry->orig_start,
+ show_map_type(__entry->block_start),
+ (unsigned long long)__entry->block_len,
+ show_map_flags(__entry->flags),
+ __entry->refs, __entry->compress_type)
+);
+
+#define show_ordered_flags(flags) \
+ __print_symbolic(flags, \
+ { BTRFS_ORDERED_IO_DONE, "IO_DONE" }, \
+ { BTRFS_ORDERED_COMPLETE, "COMPLETE" }, \
+ { BTRFS_ORDERED_NOCOW, "NOCOW" }, \
+ { BTRFS_ORDERED_COMPRESSED, "COMPRESSED" }, \
+ { BTRFS_ORDERED_PREALLOC, "PREALLOC" }, \
+ { BTRFS_ORDERED_DIRECT, "DIRECT" })
+
+DECLARE_EVENT_CLASS(btrfs__ordered_extent,
+
+ TP_PROTO(struct inode *inode, struct btrfs_ordered_extent *ordered),
+
+ TP_ARGS(inode, ordered),
+
+ TP_STRUCT__entry(
+ __field( ino_t, ino )
+ __field( u64, file_offset )
+ __field( u64, start )
+ __field( u64, len )
+ __field( u64, disk_len )
+ __field( u64, bytes_left )
+ __field( unsigned long, flags )
+ __field( int, compress_type )
+ __field( int, refs )
+ __field( u64, root_objectid )
+ ),
+
+ TP_fast_assign(
+ __entry->ino = inode->i_ino;
+ __entry->file_offset = ordered->file_offset;
+ __entry->start = ordered->start;
+ __entry->len = ordered->len;
+ __entry->disk_len = ordered->disk_len;
+ __entry->bytes_left = ordered->bytes_left;
+ __entry->flags = ordered->flags;
+ __entry->compress_type = ordered->compress_type;
+ __entry->refs = atomic_read(&ordered->refs);
+ __entry->root_objectid =
+ BTRFS_I(inode)->root->root_key.objectid;
+ ),
+
+ TP_printk("root = %llu(%s), ino = %llu, file_offset = %llu, "
+ "start = %llu, len = %llu, disk_len = %llu, "
+ "bytes_left = %llu, flags = %s, compress_type = %d, "
+ "refs = %d",
+ show_root_type(__entry->root_objectid),
+ (unsigned long long)__entry->ino,
+ (unsigned long long)__entry->file_offset,
+ (unsigned long long)__entry->start,
+ (unsigned long long)__entry->len,
+ (unsigned long long)__entry->disk_len,
+ (unsigned long long)__entry->bytes_left,
+ show_ordered_flags(__entry->flags),
+ __entry->compress_type, __entry->refs)
+);
+
+DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_add,
+
+ TP_PROTO(struct inode *inode, struct btrfs_ordered_extent *ordered),
+
+ TP_ARGS(inode, ordered)
+);
+
+DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_remove,
+
+ TP_PROTO(struct inode *inode, struct btrfs_ordered_extent *ordered),
+
+ TP_ARGS(inode, ordered)
+);
+
+DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_start,
+
+ TP_PROTO(struct inode *inode, struct btrfs_ordered_extent *ordered),
+
+ TP_ARGS(inode, ordered)
+);
+
+DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_put,
+
+ TP_PROTO(struct inode *inode, struct btrfs_ordered_extent *ordered),
+
+ TP_ARGS(inode, ordered)
+);
+
+DECLARE_EVENT_CLASS(btrfs__writepage,
+
+ TP_PROTO(struct page *page, struct inode *inode,
+ struct writeback_control *wbc),
+
+ TP_ARGS(page, inode, wbc),
+
+ TP_STRUCT__entry(
+ __field( ino_t, ino )
+ __field( pgoff_t, index )
+ __field( long, nr_to_write )
+ __field( long, pages_skipped )
+ __field( loff_t, range_start )
+ __field( loff_t, range_end )
+ __field( char, nonblocking )
+ __field( char, for_kupdate )
+ __field( char, for_reclaim )
+ __field( char, range_cyclic )
+ __field( pgoff_t, writeback_index )
+ __field( u64, root_objectid )
+ ),
+
+ TP_fast_assign(
+ __entry->ino = inode->i_ino;
+ __entry->index = page->index;
+ __entry->nr_to_write = wbc->nr_to_write;
+ __entry->pages_skipped = wbc->pages_skipped;
+ __entry->range_start = wbc->range_start;
+ __entry->range_end = wbc->range_end;
+ __entry->nonblocking = wbc->nonblocking;
+ __entry->for_kupdate = wbc->for_kupdate;
+ __entry->for_reclaim = wbc->for_reclaim;
+ __entry->range_cyclic = wbc->range_cyclic;
+ __entry->writeback_index = inode->i_mapping->writeback_index;
+ __entry->root_objectid =
+ BTRFS_I(inode)->root->root_key.objectid;
+ ),
+
+ TP_printk("root = %llu(%s), ino = %lu, page_index = %lu, "
+ "nr_to_write = %ld, pages_skipped = %ld, range_start = %llu, "
+ "range_end = %llu, nonblocking = %d, for_kupdate = %d, "
+ "for_reclaim = %d, range_cyclic = %d, writeback_index = %lu",
+ show_root_type(__entry->root_objectid),
+ (unsigned long)__entry->ino, __entry->index,
+ __entry->nr_to_write, __entry->pages_skipped,
+ __entry->range_start, __entry->range_end,
+ __entry->nonblocking, __entry->for_kupdate,
+ __entry->for_reclaim, __entry->range_cyclic,
+ (unsigned long)__entry->writeback_index)
+);
+
+DEFINE_EVENT(btrfs__writepage, __extent_writepage,
+
+ TP_PROTO(struct page *page, struct inode *inode,
+ struct writeback_control *wbc),
+
+ TP_ARGS(page, inode, wbc)
+);
+
+TRACE_EVENT(btrfs_writepage_end_io_hook,
+
+ TP_PROTO(struct page *page, u64 start, u64 end, int uptodate),
+
+ TP_ARGS(page, start, end, uptodate),
+
+ TP_STRUCT__entry(
+ __field( ino_t, ino )
+ __field( pgoff_t, index )
+ __field( u64, start )
+ __field( u64, end )
+ __field( int, uptodate )
+ __field( u64, root_objectid )
+ ),
+
+ TP_fast_assign(
+ __entry->ino = page->mapping->host->i_ino;
+ __entry->index = page->index;
+ __entry->start = start;
+ __entry->end = end;
+ __entry->uptodate = uptodate;
+ __entry->root_objectid =
+ BTRFS_I(page->mapping->host)->root->root_key.objectid;
+ ),
+
+ TP_printk("root = %llu(%s), ino = %lu, page_index = %lu, start = %llu, "
+ "end = %llu, uptodate = %d",
+ show_root_type(__entry->root_objectid),
+ (unsigned long)__entry->ino, (unsigned long)__entry->index,
+ (unsigned long long)__entry->start,
+ (unsigned long long)__entry->end, __entry->uptodate)
+);
+
+TRACE_EVENT(btrfs_sync_file,
+
+ TP_PROTO(struct file *file, int datasync),
+
+ TP_ARGS(file, datasync),
+
+ TP_STRUCT__entry(
+ __field( ino_t, ino )
+ __field( ino_t, parent )
+ __field( int, datasync )
+ __field( u64, root_objectid )
+ ),
+
+ TP_fast_assign(
+ struct dentry *dentry = file->f_path.dentry;
+ struct inode *inode = dentry->d_inode;
+
+ __entry->ino = inode->i_ino;
+ __entry->parent = dentry->d_parent->d_inode->i_ino;
+ __entry->datasync = datasync;
+ __entry->root_objectid =
+ BTRFS_I(inode)->root->root_key.objectid;
+ ),
+
+ TP_printk("root = %llu(%s), ino = %ld, parent = %ld, datasync = %d",
+ show_root_type(__entry->root_objectid),
+ (unsigned long)__entry->ino, (unsigned long)__entry->parent,
+ __entry->datasync)
+);
+
+TRACE_EVENT(btrfs_sync_fs,
+
+ TP_PROTO(int wait),
+
+ TP_ARGS(wait),
+
+ TP_STRUCT__entry(
+ __field( int, wait )
+ ),
+
+ TP_fast_assign(
+ __entry->wait = wait;
+ ),
+
+ TP_printk("wait = %d", __entry->wait)
+);
+
+#define show_ref_action(action) \
+ __print_symbolic(action, \
+ { BTRFS_ADD_DELAYED_REF, "ADD_DELAYED_REF" }, \
+ { BTRFS_DROP_DELAYED_REF, "DROP_DELAYED_REF" }, \
+ { BTRFS_ADD_DELAYED_EXTENT, "ADD_DELAYED_EXTENT" }, \
+ { BTRFS_UPDATE_DELAYED_HEAD, "UPDATE_DELAYED_HEAD" })
+
+
+TRACE_EVENT(btrfs_delayed_tree_ref,
+
+ TP_PROTO(struct btrfs_delayed_ref_node *ref,
+ struct btrfs_delayed_tree_ref *full_ref,
+ int action),
+
+ TP_ARGS(ref, full_ref, action),
+
+ TP_STRUCT__entry(
+ __field( u64, bytenr )
+ __field( u64, num_bytes )
+ __field( int, action )
+ __field( u64, parent )
+ __field( u64, ref_root )
+ __field( int, level )
+ __field( int, type )
+ ),
+
+ TP_fast_assign(
+ __entry->bytenr = ref->bytenr;
+ __entry->num_bytes = ref->num_bytes;
+ __entry->action = action;
+ __entry->parent = full_ref->parent;
+ __entry->ref_root = full_ref->root;
+ __entry->level = full_ref->level;
+ __entry->type = ref->type;
+ ),
+
+ TP_printk("bytenr = %llu, num_bytes = %llu, action = %s, "
+ "parent = %llu(%s), ref_root = %llu(%s), level = %d, "
+ "type = %s",
+ (unsigned long long)__entry->bytenr,
+ (unsigned long long)__entry->num_bytes,
+ show_ref_action(__entry->action),
+ show_root_type(__entry->parent),
+ show_root_type(__entry->ref_root),
+ __entry->level, show_ref_type(__entry->type))
+);
+
+TRACE_EVENT(btrfs_delayed_data_ref,
+
+ TP_PROTO(struct btrfs_delayed_ref_node *ref,
+ struct btrfs_delayed_data_ref *full_ref,
+ int action),
+
+ TP_ARGS(ref, full_ref, action),
+
+ TP_STRUCT__entry(
+ __field( u64, bytenr )
+ __field( u64, num_bytes )
+ __field( int, action )
+ __field( u64, parent )
+ __field( u64, ref_root )
+ __field( u64, owner )
+ __field( u64, offset )
+ __field( int, type )
+ ),
+
+ TP_fast_assign(
+ __entry->bytenr = ref->bytenr;
+ __entry->num_bytes = ref->num_bytes;
+ __entry->action = action;
+ __entry->parent = full_ref->parent;
+ __entry->ref_root = full_ref->root;
+ __entry->owner = full_ref->objectid;
+ __entry->offset = full_ref->offset;
+ __entry->type = ref->type;
+ ),
+
+ TP_printk("bytenr = %llu, num_bytes = %llu, action = %s, "
+ "parent = %llu(%s), ref_root = %llu(%s), owner = %llu, "
+ "offset = %llu, type = %s",
+ (unsigned long long)__entry->bytenr,
+ (unsigned long long)__entry->num_bytes,
+ show_ref_action(__entry->action),
+ show_root_type(__entry->parent),
+ show_root_type(__entry->ref_root),
+ (unsigned long long)__entry->owner,
+ (unsigned long long)__entry->offset,
+ show_ref_type(__entry->type))
+);
+
+TRACE_EVENT(btrfs_delayed_ref_head,
+
+ TP_PROTO(struct btrfs_delayed_ref_node *ref,
+ struct btrfs_delayed_ref_head *head_ref,
+ int action),
+
+ TP_ARGS(ref, head_ref, action),
+
+ TP_STRUCT__entry(
+ __field( u64, bytenr )
+ __field( u64, num_bytes )
+ __field( int, action )
+ __field( int, is_data )
+ ),
+
+ TP_fast_assign(
+ __entry->bytenr = ref->bytenr;
+ __entry->num_bytes = ref->num_bytes;
+ __entry->action = action;
+ __entry->is_data = head_ref->is_data;
+ ),
+
+ TP_printk("bytenr = %llu, num_bytes = %llu, action = %s, is_data = %d",
+ (unsigned long long)__entry->bytenr,
+ (unsigned long long)__entry->num_bytes,
+ show_ref_action(__entry->action),
+ __entry->is_data)
+);
+
+#define show_chunk_type(type) \
+ __print_flags(type, "|", \
+ { BTRFS_BLOCK_GROUP_DATA, "DATA" }, \
+ { BTRFS_BLOCK_GROUP_SYSTEM, "SYSTEM"}, \
+ { BTRFS_BLOCK_GROUP_METADATA, "METADATA"}, \
+ { BTRFS_BLOCK_GROUP_RAID0, "RAID0" }, \
+ { BTRFS_BLOCK_GROUP_RAID1, "RAID1" }, \
+ { BTRFS_BLOCK_GROUP_DUP, "DUP" }, \
+ { BTRFS_BLOCK_GROUP_RAID10, "RAID10"})
+
+DECLARE_EVENT_CLASS(btrfs__chunk,
+
+ TP_PROTO(struct btrfs_root *root, struct map_lookup *map,
+ u64 offset, u64 size),
+
+ TP_ARGS(root, map, offset, size),
+
+ TP_STRUCT__entry(
+ __field( int, num_stripes )
+ __field( u64, type )
+ __field( int, sub_stripes )
+ __field( u64, offset )
+ __field( u64, size )
+ __field( u64, root_objectid )
+ ),
+
+ TP_fast_assign(
+ __entry->num_stripes = map->num_stripes;
+ __entry->type = map->type;
+ __entry->sub_stripes = map->sub_stripes;
+ __entry->offset = offset;
+ __entry->size = size;
+ __entry->root_objectid = root->root_key.objectid;
+ ),
+
+ TP_printk("root = %llu(%s), offset = %llu, size = %llu, "
+ "num_stripes = %d, sub_stripes = %d, type = %s",
+ show_root_type(__entry->root_objectid),
+ (unsigned long long)__entry->offset,
+ (unsigned long long)__entry->size,
+ __entry->num_stripes, __entry->sub_stripes,
+ show_chunk_type(__entry->type))
+);
+
+DEFINE_EVENT(btrfs__chunk, btrfs_chunk_alloc,
+
+ TP_PROTO(struct btrfs_root *root, struct map_lookup *map,
+ u64 offset, u64 size),
+
+ TP_ARGS(root, map, offset, size)
+);
+
+DEFINE_EVENT(btrfs__chunk, btrfs_chunk_free,
+
+ TP_PROTO(struct btrfs_root *root, struct map_lookup *map,
+ u64 offset, u64 size),
+
+ TP_ARGS(root, map, offset, size)
+);
+
+TRACE_EVENT(btrfs_cow_block,
+
+ TP_PROTO(struct btrfs_root *root, struct extent_buffer *buf,
+ struct extent_buffer *cow),
+
+ TP_ARGS(root, buf, cow),
+
+ TP_STRUCT__entry(
+ __field( u64, root_objectid )
+ __field( u64, buf_start )
+ __field( int, refs )
+ __field( u64, cow_start )
+ __field( int, buf_level )
+ __field( int, cow_level )
+ ),
+
+ TP_fast_assign(
+ __entry->root_objectid = root->root_key.objectid;
+ __entry->buf_start = buf->start;
+ __entry->refs = atomic_read(&buf->refs);
+ __entry->cow_start = cow->start;
+ __entry->buf_level = btrfs_header_level(buf);
+ __entry->cow_level = btrfs_header_level(cow);
+ ),
+
+ TP_printk("root = %llu(%s), refs = %d, orig_buf = %llu "
+ "(orig_level = %d), cow_buf = %llu (cow_level = %d)",
+ show_root_type(__entry->root_objectid),
+ __entry->refs,
+ (unsigned long long)__entry->buf_start,
+ __entry->buf_level,
+ (unsigned long long)__entry->cow_start,
+ __entry->cow_level)
+);
+
+DECLARE_EVENT_CLASS(btrfs__reserved_extent,
+
+ TP_PROTO(struct btrfs_root *root, u64 start, u64 len),
+
+ TP_ARGS(root, start, len),
+
+ TP_STRUCT__entry(
+ __field( u64, root_objectid )
+ __field( u64, start )
+ __field( u64, len )
+ ),
+
+ TP_fast_assign(
+ __entry->root_objectid = root->root_key.objectid;
+ __entry->start = start;
+ __entry->len = len;
+ ),
+
+ TP_printk("root = %llu(%s), start = %llu, len = %llu",
+ show_root_type(__entry->root_objectid),
+ (unsigned long long)__entry->start,
+ (unsigned long long)__entry->len)
+);
+
+DEFINE_EVENT(btrfs__reserved_extent, btrfs_reserved_extent_alloc,
+
+ TP_PROTO(struct btrfs_root *root, u64 start, u64 len),
+
+ TP_ARGS(root, start, len)
+);
+
+DEFINE_EVENT(btrfs__reserved_extent, btrfs_reserved_extent_free,
+
+ TP_PROTO(struct btrfs_root *root, u64 start, u64 len),
+
+ TP_ARGS(root, start, len)
+);
+
+#endif /* _TRACE_BTRFS_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/compaction.h b/include/trace/events/compaction.h
new file mode 100644
index 00000000..388bcdd2
--- /dev/null
+++ b/include/trace/events/compaction.h
@@ -0,0 +1,74 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM compaction
+
+#if !defined(_TRACE_COMPACTION_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_COMPACTION_H
+
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+#include "gfpflags.h"
+
+DECLARE_EVENT_CLASS(mm_compaction_isolate_template,
+
+ TP_PROTO(unsigned long nr_scanned,
+ unsigned long nr_taken),
+
+ TP_ARGS(nr_scanned, nr_taken),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, nr_scanned)
+ __field(unsigned long, nr_taken)
+ ),
+
+ TP_fast_assign(
+ __entry->nr_scanned = nr_scanned;
+ __entry->nr_taken = nr_taken;
+ ),
+
+ TP_printk("nr_scanned=%lu nr_taken=%lu",
+ __entry->nr_scanned,
+ __entry->nr_taken)
+);
+
+DEFINE_EVENT(mm_compaction_isolate_template, mm_compaction_isolate_migratepages,
+
+ TP_PROTO(unsigned long nr_scanned,
+ unsigned long nr_taken),
+
+ TP_ARGS(nr_scanned, nr_taken)
+);
+
+DEFINE_EVENT(mm_compaction_isolate_template, mm_compaction_isolate_freepages,
+ TP_PROTO(unsigned long nr_scanned,
+ unsigned long nr_taken),
+
+ TP_ARGS(nr_scanned, nr_taken)
+);
+
+TRACE_EVENT(mm_compaction_migratepages,
+
+ TP_PROTO(unsigned long nr_migrated,
+ unsigned long nr_failed),
+
+ TP_ARGS(nr_migrated, nr_failed),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, nr_migrated)
+ __field(unsigned long, nr_failed)
+ ),
+
+ TP_fast_assign(
+ __entry->nr_migrated = nr_migrated;
+ __entry->nr_failed = nr_failed;
+ ),
+
+ TP_printk("nr_migrated=%lu nr_failed=%lu",
+ __entry->nr_migrated,
+ __entry->nr_failed)
+);
+
+
+#endif /* _TRACE_COMPACTION_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/cpufreq_interactive.h b/include/trace/events/cpufreq_interactive.h
new file mode 100644
index 00000000..3a90d3d6
--- /dev/null
+++ b/include/trace/events/cpufreq_interactive.h
@@ -0,0 +1,87 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cpufreq_interactive
+
+#if !defined(_TRACE_CPUFREQ_INTERACTIVE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_CPUFREQ_INTERACTIVE_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(set,
+ TP_PROTO(u32 cpu_id, unsigned long targfreq,
+ unsigned long actualfreq),
+ TP_ARGS(cpu_id, targfreq, actualfreq),
+
+ TP_STRUCT__entry(
+ __field( u32, cpu_id )
+ __field(unsigned long, targfreq )
+ __field(unsigned long, actualfreq )
+ ),
+
+ TP_fast_assign(
+ __entry->cpu_id = (u32) cpu_id;
+ __entry->targfreq = targfreq;
+ __entry->actualfreq = actualfreq;
+ ),
+
+ TP_printk("cpu=%u targ=%lu actual=%lu",
+ __entry->cpu_id, __entry->targfreq,
+ __entry->actualfreq)
+);
+
+DEFINE_EVENT(set, cpufreq_interactive_up,
+ TP_PROTO(u32 cpu_id, unsigned long targfreq,
+ unsigned long actualfreq),
+ TP_ARGS(cpu_id, targfreq, actualfreq)
+);
+
+DEFINE_EVENT(set, cpufreq_interactive_down,
+ TP_PROTO(u32 cpu_id, unsigned long targfreq,
+ unsigned long actualfreq),
+ TP_ARGS(cpu_id, targfreq, actualfreq)
+);
+
+DECLARE_EVENT_CLASS(loadeval,
+ TP_PROTO(unsigned long cpu_id, unsigned long load,
+ unsigned long curfreq, unsigned long targfreq),
+ TP_ARGS(cpu_id, load, curfreq, targfreq),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, cpu_id )
+ __field(unsigned long, load )
+ __field(unsigned long, curfreq )
+ __field(unsigned long, targfreq )
+ ),
+
+ TP_fast_assign(
+ __entry->cpu_id = cpu_id;
+ __entry->load = load;
+ __entry->curfreq = curfreq;
+ __entry->targfreq = targfreq;
+ ),
+
+ TP_printk("cpu=%lu load=%lu cur=%lu targ=%lu",
+ __entry->cpu_id, __entry->load, __entry->curfreq,
+ __entry->targfreq)
+);
+
+DEFINE_EVENT(loadeval, cpufreq_interactive_target,
+ TP_PROTO(unsigned long cpu_id, unsigned long load,
+ unsigned long curfreq, unsigned long targfreq),
+ TP_ARGS(cpu_id, load, curfreq, targfreq)
+);
+
+DEFINE_EVENT(loadeval, cpufreq_interactive_already,
+ TP_PROTO(unsigned long cpu_id, unsigned long load,
+ unsigned long curfreq, unsigned long targfreq),
+ TP_ARGS(cpu_id, load, curfreq, targfreq)
+);
+
+DEFINE_EVENT(loadeval, cpufreq_interactive_notyet,
+ TP_PROTO(unsigned long cpu_id, unsigned long load,
+ unsigned long curfreq, unsigned long targfreq),
+ TP_ARGS(cpu_id, load, curfreq, targfreq)
+);
+#endif /* _TRACE_CPUFREQ_INTERACTIVE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
new file mode 100644
index 00000000..5ce2b2f5
--- /dev/null
+++ b/include/trace/events/ext4.h
@@ -0,0 +1,1526 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ext4
+
+#if !defined(_TRACE_EXT4_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_EXT4_H
+
+#include <linux/writeback.h>
+#include <linux/tracepoint.h>
+
+struct ext4_allocation_context;
+struct ext4_allocation_request;
+struct ext4_prealloc_space;
+struct ext4_inode_info;
+struct mpage_da_data;
+
+#define EXT4_I(inode) (container_of(inode, struct ext4_inode_info, vfs_inode))
+
+TRACE_EVENT(ext4_free_inode,
+ TP_PROTO(struct inode *inode),
+
+ TP_ARGS(inode),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( ino_t, ino )
+ __field( umode_t, mode )
+ __field( uid_t, uid )
+ __field( gid_t, gid )
+ __field( __u64, blocks )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->mode = inode->i_mode;
+ __entry->uid = inode->i_uid;
+ __entry->gid = inode->i_gid;
+ __entry->blocks = inode->i_blocks;
+ ),
+
+ TP_printk("dev %d,%d ino %lu mode 0%o uid %u gid %u blocks %llu",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino, __entry->mode,
+ __entry->uid, __entry->gid, __entry->blocks)
+);
+
+TRACE_EVENT(ext4_request_inode,
+ TP_PROTO(struct inode *dir, int mode),
+
+ TP_ARGS(dir, mode),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( ino_t, dir )
+ __field( umode_t, mode )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dir->i_sb->s_dev;
+ __entry->dir = dir->i_ino;
+ __entry->mode = mode;
+ ),
+
+ TP_printk("dev %d,%d dir %lu mode 0%o",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->dir, __entry->mode)
+);
+
+TRACE_EVENT(ext4_allocate_inode,
+ TP_PROTO(struct inode *inode, struct inode *dir, int mode),
+
+ TP_ARGS(inode, dir, mode),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( ino_t, ino )
+ __field( ino_t, dir )
+ __field( umode_t, mode )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->dir = dir->i_ino;
+ __entry->mode = mode;
+ ),
+
+ TP_printk("dev %d,%d ino %lu dir %lu mode 0%o",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino,
+ (unsigned long) __entry->dir, __entry->mode)
+);
+
+TRACE_EVENT(ext4_evict_inode,
+ TP_PROTO(struct inode *inode),
+
+ TP_ARGS(inode),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( ino_t, ino )
+ __field( int, nlink )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->nlink = inode->i_nlink;
+ ),
+
+ TP_printk("dev %d,%d ino %lu nlink %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino, __entry->nlink)
+);
+
+TRACE_EVENT(ext4_drop_inode,
+ TP_PROTO(struct inode *inode, int drop),
+
+ TP_ARGS(inode, drop),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( ino_t, ino )
+ __field( int, drop )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->drop = drop;
+ ),
+
+ TP_printk("dev %d,%d ino %lu drop %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino, __entry->drop)
+);
+
+TRACE_EVENT(ext4_mark_inode_dirty,
+ TP_PROTO(struct inode *inode, unsigned long IP),
+
+ TP_ARGS(inode, IP),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( ino_t, ino )
+ __field(unsigned long, ip )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->ip = IP;
+ ),
+
+ TP_printk("dev %d,%d ino %lu caller %pF",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino, (void *)__entry->ip)
+);
+
+TRACE_EVENT(ext4_begin_ordered_truncate,
+ TP_PROTO(struct inode *inode, loff_t new_size),
+
+ TP_ARGS(inode, new_size),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( ino_t, ino )
+ __field( loff_t, new_size )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->new_size = new_size;
+ ),
+
+ TP_printk("dev %d,%d ino %lu new_size %lld",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino,
+ __entry->new_size)
+);
+
+DECLARE_EVENT_CLASS(ext4__write_begin,
+
+ TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
+ unsigned int flags),
+
+ TP_ARGS(inode, pos, len, flags),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( ino_t, ino )
+ __field( loff_t, pos )
+ __field( unsigned int, len )
+ __field( unsigned int, flags )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->pos = pos;
+ __entry->len = len;
+ __entry->flags = flags;
+ ),
+
+ TP_printk("dev %d,%d ino %lu pos %lld len %u flags %u",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino,
+ __entry->pos, __entry->len, __entry->flags)
+);
+
+DEFINE_EVENT(ext4__write_begin, ext4_write_begin,
+
+ TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
+ unsigned int flags),
+
+ TP_ARGS(inode, pos, len, flags)
+);
+
+DEFINE_EVENT(ext4__write_begin, ext4_da_write_begin,
+
+ TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
+ unsigned int flags),
+
+ TP_ARGS(inode, pos, len, flags)
+);
+
+DECLARE_EVENT_CLASS(ext4__write_end,
+ TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
+ unsigned int copied),
+
+ TP_ARGS(inode, pos, len, copied),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( ino_t, ino )
+ __field( loff_t, pos )
+ __field( unsigned int, len )
+ __field( unsigned int, copied )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->pos = pos;
+ __entry->len = len;
+ __entry->copied = copied;
+ ),
+
+ TP_printk("dev %d,%d ino %lu pos %lld len %u copied %u",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino,
+ __entry->pos, __entry->len, __entry->copied)
+);
+
+DEFINE_EVENT(ext4__write_end, ext4_ordered_write_end,
+
+ TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
+ unsigned int copied),
+
+ TP_ARGS(inode, pos, len, copied)
+);
+
+DEFINE_EVENT(ext4__write_end, ext4_writeback_write_end,
+
+ TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
+ unsigned int copied),
+
+ TP_ARGS(inode, pos, len, copied)
+);
+
+DEFINE_EVENT(ext4__write_end, ext4_journalled_write_end,
+
+ TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
+ unsigned int copied),
+
+ TP_ARGS(inode, pos, len, copied)
+);
+
+DEFINE_EVENT(ext4__write_end, ext4_da_write_end,
+
+ TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
+ unsigned int copied),
+
+ TP_ARGS(inode, pos, len, copied)
+);
+
+TRACE_EVENT(ext4_da_writepages,
+ TP_PROTO(struct inode *inode, struct writeback_control *wbc),
+
+ TP_ARGS(inode, wbc),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( ino_t, ino )
+ __field( long, nr_to_write )
+ __field( long, pages_skipped )
+ __field( loff_t, range_start )
+ __field( loff_t, range_end )
+ __field( int, sync_mode )
+ __field( char, for_kupdate )
+ __field( char, range_cyclic )
+ __field( pgoff_t, writeback_index )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->nr_to_write = wbc->nr_to_write;
+ __entry->pages_skipped = wbc->pages_skipped;
+ __entry->range_start = wbc->range_start;
+ __entry->range_end = wbc->range_end;
+ __entry->sync_mode = wbc->sync_mode;
+ __entry->for_kupdate = wbc->for_kupdate;
+ __entry->range_cyclic = wbc->range_cyclic;
+ __entry->writeback_index = inode->i_mapping->writeback_index;
+ ),
+
+ TP_printk("dev %d,%d ino %lu nr_to_write %ld pages_skipped %ld "
+ "range_start %lld range_end %lld sync_mode %d"
+ "for_kupdate %d range_cyclic %d writeback_index %lu",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino, __entry->nr_to_write,
+ __entry->pages_skipped, __entry->range_start,
+ __entry->range_end, __entry->sync_mode,
+ __entry->for_kupdate, __entry->range_cyclic,
+ (unsigned long) __entry->writeback_index)
+);
+
+TRACE_EVENT(ext4_da_write_pages,
+ TP_PROTO(struct inode *inode, struct mpage_da_data *mpd),
+
+ TP_ARGS(inode, mpd),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( ino_t, ino )
+ __field( __u64, b_blocknr )
+ __field( __u32, b_size )
+ __field( __u32, b_state )
+ __field( unsigned long, first_page )
+ __field( int, io_done )
+ __field( int, pages_written )
+ __field( int, sync_mode )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->b_blocknr = mpd->b_blocknr;
+ __entry->b_size = mpd->b_size;
+ __entry->b_state = mpd->b_state;
+ __entry->first_page = mpd->first_page;
+ __entry->io_done = mpd->io_done;
+ __entry->pages_written = mpd->pages_written;
+ __entry->sync_mode = mpd->wbc->sync_mode;
+ ),
+
+ TP_printk("dev %d,%d ino %lu b_blocknr %llu b_size %u b_state 0x%04x "
+ "first_page %lu io_done %d pages_written %d sync_mode %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino,
+ __entry->b_blocknr, __entry->b_size,
+ __entry->b_state, __entry->first_page,
+ __entry->io_done, __entry->pages_written,
+ __entry->sync_mode
+ )
+);
+
+TRACE_EVENT(ext4_da_writepages_result,
+ TP_PROTO(struct inode *inode, struct writeback_control *wbc,
+ int ret, int pages_written),
+
+ TP_ARGS(inode, wbc, ret, pages_written),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( ino_t, ino )
+ __field( int, ret )
+ __field( int, pages_written )
+ __field( long, pages_skipped )
+ __field( int, sync_mode )
+ __field( char, more_io )
+ __field( pgoff_t, writeback_index )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->ret = ret;
+ __entry->pages_written = pages_written;
+ __entry->pages_skipped = wbc->pages_skipped;
+ __entry->sync_mode = wbc->sync_mode;
+ __entry->more_io = wbc->more_io;
+ __entry->writeback_index = inode->i_mapping->writeback_index;
+ ),
+
+ TP_printk("dev %d,%d ino %lu ret %d pages_written %d pages_skipped %ld "
+ " more_io %d sync_mode %d writeback_index %lu",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino, __entry->ret,
+ __entry->pages_written, __entry->pages_skipped,
+ __entry->more_io, __entry->sync_mode,
+ (unsigned long) __entry->writeback_index)
+);
+
+DECLARE_EVENT_CLASS(ext4__page_op,
+ TP_PROTO(struct page *page),
+
+ TP_ARGS(page),
+
+ TP_STRUCT__entry(
+ __field( pgoff_t, index )
+ __field( ino_t, ino )
+ __field( dev_t, dev )
+
+ ),
+
+ TP_fast_assign(
+ __entry->index = page->index;
+ __entry->ino = page->mapping->host->i_ino;
+ __entry->dev = page->mapping->host->i_sb->s_dev;
+ ),
+
+ TP_printk("dev %d,%d ino %lu page_index %lu",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino,
+ (unsigned long) __entry->index)
+);
+
+DEFINE_EVENT(ext4__page_op, ext4_writepage,
+
+ TP_PROTO(struct page *page),
+
+ TP_ARGS(page)
+);
+
+DEFINE_EVENT(ext4__page_op, ext4_readpage,
+
+ TP_PROTO(struct page *page),
+
+ TP_ARGS(page)
+);
+
+DEFINE_EVENT(ext4__page_op, ext4_releasepage,
+
+ TP_PROTO(struct page *page),
+
+ TP_ARGS(page)
+);
+
+TRACE_EVENT(ext4_invalidatepage,
+ TP_PROTO(struct page *page, unsigned long offset),
+
+ TP_ARGS(page, offset),
+
+ TP_STRUCT__entry(
+ __field( pgoff_t, index )
+ __field( unsigned long, offset )
+ __field( ino_t, ino )
+ __field( dev_t, dev )
+
+ ),
+
+ TP_fast_assign(
+ __entry->index = page->index;
+ __entry->offset = offset;
+ __entry->ino = page->mapping->host->i_ino;
+ __entry->dev = page->mapping->host->i_sb->s_dev;
+ ),
+
+ TP_printk("dev %d,%d ino %lu page_index %lu offset %lu",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino,
+ (unsigned long) __entry->index, __entry->offset)
+);
+
+TRACE_EVENT(ext4_discard_blocks,
+ TP_PROTO(struct super_block *sb, unsigned long long blk,
+ unsigned long long count),
+
+ TP_ARGS(sb, blk, count),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( __u64, blk )
+ __field( __u64, count )
+
+ ),
+
+ TP_fast_assign(
+ __entry->dev = sb->s_dev;
+ __entry->blk = blk;
+ __entry->count = count;
+ ),
+
+ TP_printk("dev %d,%d blk %llu count %llu",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->blk, __entry->count)
+);
+
+DECLARE_EVENT_CLASS(ext4__mb_new_pa,
+ TP_PROTO(struct ext4_allocation_context *ac,
+ struct ext4_prealloc_space *pa),
+
+ TP_ARGS(ac, pa),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( ino_t, ino )
+ __field( __u64, pa_pstart )
+ __field( __u32, pa_len )
+ __field( __u64, pa_lstart )
+
+ ),
+
+ TP_fast_assign(
+ __entry->dev = ac->ac_sb->s_dev;
+ __entry->ino = ac->ac_inode->i_ino;
+ __entry->pa_pstart = pa->pa_pstart;
+ __entry->pa_len = pa->pa_len;
+ __entry->pa_lstart = pa->pa_lstart;
+ ),
+
+ TP_printk("dev %d,%d ino %lu pstart %llu len %u lstart %llu",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino,
+ __entry->pa_pstart, __entry->pa_len, __entry->pa_lstart)
+);
+
+DEFINE_EVENT(ext4__mb_new_pa, ext4_mb_new_inode_pa,
+
+ TP_PROTO(struct ext4_allocation_context *ac,
+ struct ext4_prealloc_space *pa),
+
+ TP_ARGS(ac, pa)
+);
+
+DEFINE_EVENT(ext4__mb_new_pa, ext4_mb_new_group_pa,
+
+ TP_PROTO(struct ext4_allocation_context *ac,
+ struct ext4_prealloc_space *pa),
+
+ TP_ARGS(ac, pa)
+);
+
+TRACE_EVENT(ext4_mb_release_inode_pa,
+ TP_PROTO(struct ext4_prealloc_space *pa,
+ unsigned long long block, unsigned int count),
+
+ TP_ARGS(pa, block, count),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( ino_t, ino )
+ __field( __u64, block )
+ __field( __u32, count )
+
+ ),
+
+ TP_fast_assign(
+ __entry->dev = pa->pa_inode->i_sb->s_dev;
+ __entry->ino = pa->pa_inode->i_ino;
+ __entry->block = block;
+ __entry->count = count;
+ ),
+
+ TP_printk("dev %d,%d ino %lu block %llu count %u",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino,
+ __entry->block, __entry->count)
+);
+
+TRACE_EVENT(ext4_mb_release_group_pa,
+ TP_PROTO(struct ext4_prealloc_space *pa),
+
+ TP_ARGS(pa),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( __u64, pa_pstart )
+ __field( __u32, pa_len )
+
+ ),
+
+ TP_fast_assign(
+ __entry->dev = pa->pa_inode->i_sb->s_dev;
+ __entry->pa_pstart = pa->pa_pstart;
+ __entry->pa_len = pa->pa_len;
+ ),
+
+ TP_printk("dev %d,%d pstart %llu len %u",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->pa_pstart, __entry->pa_len)
+);
+
+TRACE_EVENT(ext4_discard_preallocations,
+ TP_PROTO(struct inode *inode),
+
+ TP_ARGS(inode),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( ino_t, ino )
+
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ ),
+
+ TP_printk("dev %d,%d ino %lu",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino)
+);
+
+TRACE_EVENT(ext4_mb_discard_preallocations,
+ TP_PROTO(struct super_block *sb, int needed),
+
+ TP_ARGS(sb, needed),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( int, needed )
+
+ ),
+
+ TP_fast_assign(
+ __entry->dev = sb->s_dev;
+ __entry->needed = needed;
+ ),
+
+ TP_printk("dev %d,%d needed %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->needed)
+);
+
+TRACE_EVENT(ext4_request_blocks,
+ TP_PROTO(struct ext4_allocation_request *ar),
+
+ TP_ARGS(ar),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( ino_t, ino )
+ __field( unsigned int, flags )
+ __field( unsigned int, len )
+ __field( __u32, logical )
+ __field( __u32, lleft )
+ __field( __u32, lright )
+ __field( __u64, goal )
+ __field( __u64, pleft )
+ __field( __u64, pright )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = ar->inode->i_sb->s_dev;
+ __entry->ino = ar->inode->i_ino;
+ __entry->flags = ar->flags;
+ __entry->len = ar->len;
+ __entry->logical = ar->logical;
+ __entry->goal = ar->goal;
+ __entry->lleft = ar->lleft;
+ __entry->lright = ar->lright;
+ __entry->pleft = ar->pleft;
+ __entry->pright = ar->pright;
+ ),
+
+ TP_printk("dev %d,%d ino %lu flags %u len %u lblk %u goal %llu "
+ "lleft %u lright %u pleft %llu pright %llu ",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino, __entry->flags,
+ __entry->len, __entry->logical, __entry->goal,
+ __entry->lleft, __entry->lright, __entry->pleft,
+ __entry->pright)
+);
+
+TRACE_EVENT(ext4_allocate_blocks,
+ TP_PROTO(struct ext4_allocation_request *ar, unsigned long long block),
+
+ TP_ARGS(ar, block),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( ino_t, ino )
+ __field( __u64, block )
+ __field( unsigned int, flags )
+ __field( unsigned int, len )
+ __field( __u32, logical )
+ __field( __u32, lleft )
+ __field( __u32, lright )
+ __field( __u64, goal )
+ __field( __u64, pleft )
+ __field( __u64, pright )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = ar->inode->i_sb->s_dev;
+ __entry->ino = ar->inode->i_ino;
+ __entry->block = block;
+ __entry->flags = ar->flags;
+ __entry->len = ar->len;
+ __entry->logical = ar->logical;
+ __entry->goal = ar->goal;
+ __entry->lleft = ar->lleft;
+ __entry->lright = ar->lright;
+ __entry->pleft = ar->pleft;
+ __entry->pright = ar->pright;
+ ),
+
+ TP_printk("dev %d,%d ino %lu flags %u len %u block %llu lblk %u "
+ "goal %llu lleft %u lright %u pleft %llu pright %llu",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino, __entry->flags,
+ __entry->len, __entry->block, __entry->logical,
+ __entry->goal, __entry->lleft, __entry->lright,
+ __entry->pleft, __entry->pright)
+);
+
+TRACE_EVENT(ext4_free_blocks,
+ TP_PROTO(struct inode *inode, __u64 block, unsigned long count,
+ int flags),
+
+ TP_ARGS(inode, block, count, flags),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( ino_t, ino )
+ __field( umode_t, mode )
+ __field( __u64, block )
+ __field( unsigned long, count )
+ __field( int, flags )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->mode = inode->i_mode;
+ __entry->block = block;
+ __entry->count = count;
+ __entry->flags = flags;
+ ),
+
+ TP_printk("dev %d,%d ino %lu mode 0%o block %llu count %lu flags %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino,
+ __entry->mode, __entry->block, __entry->count,
+ __entry->flags)
+);
+
+TRACE_EVENT(ext4_sync_file_enter,
+ TP_PROTO(struct file *file, int datasync),
+
+ TP_ARGS(file, datasync),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( ino_t, ino )
+ __field( ino_t, parent )
+ __field( int, datasync )
+ ),
+
+ TP_fast_assign(
+ struct dentry *dentry = file->f_path.dentry;
+
+ __entry->dev = dentry->d_inode->i_sb->s_dev;
+ __entry->ino = dentry->d_inode->i_ino;
+ __entry->datasync = datasync;
+ __entry->parent = dentry->d_parent->d_inode->i_ino;
+ ),
+
+ TP_printk("dev %d,%d ino %lu parent %lu datasync %d ",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino,
+ (unsigned long) __entry->parent, __entry->datasync)
+);
+
+TRACE_EVENT(ext4_sync_file_exit,
+ TP_PROTO(struct inode *inode, int ret),
+
+ TP_ARGS(inode, ret),
+
+ TP_STRUCT__entry(
+ __field( int, ret )
+ __field( ino_t, ino )
+ __field( dev_t, dev )
+ ),
+
+ TP_fast_assign(
+ __entry->ret = ret;
+ __entry->ino = inode->i_ino;
+ __entry->dev = inode->i_sb->s_dev;
+ ),
+
+ TP_printk("dev %d,%d ino %lu ret %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino,
+ __entry->ret)
+);
+
+TRACE_EVENT(ext4_sync_fs,
+ TP_PROTO(struct super_block *sb, int wait),
+
+ TP_ARGS(sb, wait),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( int, wait )
+
+ ),
+
+ TP_fast_assign(
+ __entry->dev = sb->s_dev;
+ __entry->wait = wait;
+ ),
+
+ TP_printk("dev %d,%d wait %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->wait)
+);
+
+TRACE_EVENT(ext4_alloc_da_blocks,
+ TP_PROTO(struct inode *inode),
+
+ TP_ARGS(inode),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( ino_t, ino )
+ __field( unsigned int, data_blocks )
+ __field( unsigned int, meta_blocks )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->data_blocks = EXT4_I(inode)->i_reserved_data_blocks;
+ __entry->meta_blocks = EXT4_I(inode)->i_reserved_meta_blocks;
+ ),
+
+ TP_printk("dev %d,%d ino %lu data_blocks %u meta_blocks %u",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino,
+ __entry->data_blocks, __entry->meta_blocks)
+);
+
+TRACE_EVENT(ext4_mballoc_alloc,
+ TP_PROTO(struct ext4_allocation_context *ac),
+
+ TP_ARGS(ac),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( ino_t, ino )
+ __field( __u16, found )
+ __field( __u16, groups )
+ __field( __u16, buddy )
+ __field( __u16, flags )
+ __field( __u16, tail )
+ __field( __u8, cr )
+ __field( __u32, orig_logical )
+ __field( int, orig_start )
+ __field( __u32, orig_group )
+ __field( int, orig_len )
+ __field( __u32, goal_logical )
+ __field( int, goal_start )
+ __field( __u32, goal_group )
+ __field( int, goal_len )
+ __field( __u32, result_logical )
+ __field( int, result_start )
+ __field( __u32, result_group )
+ __field( int, result_len )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = ac->ac_inode->i_sb->s_dev;
+ __entry->ino = ac->ac_inode->i_ino;
+ __entry->found = ac->ac_found;
+ __entry->flags = ac->ac_flags;
+ __entry->groups = ac->ac_groups_scanned;
+ __entry->buddy = ac->ac_buddy;
+ __entry->tail = ac->ac_tail;
+ __entry->cr = ac->ac_criteria;
+ __entry->orig_logical = ac->ac_o_ex.fe_logical;
+ __entry->orig_start = ac->ac_o_ex.fe_start;
+ __entry->orig_group = ac->ac_o_ex.fe_group;
+ __entry->orig_len = ac->ac_o_ex.fe_len;
+ __entry->goal_logical = ac->ac_g_ex.fe_logical;
+ __entry->goal_start = ac->ac_g_ex.fe_start;
+ __entry->goal_group = ac->ac_g_ex.fe_group;
+ __entry->goal_len = ac->ac_g_ex.fe_len;
+ __entry->result_logical = ac->ac_f_ex.fe_logical;
+ __entry->result_start = ac->ac_f_ex.fe_start;
+ __entry->result_group = ac->ac_f_ex.fe_group;
+ __entry->result_len = ac->ac_f_ex.fe_len;
+ ),
+
+ TP_printk("dev %d,%d inode %lu orig %u/%d/%u@%u goal %u/%d/%u@%u "
+ "result %u/%d/%u@%u blks %u grps %u cr %u flags 0x%04x "
+ "tail %u broken %u",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino,
+ __entry->orig_group, __entry->orig_start,
+ __entry->orig_len, __entry->orig_logical,
+ __entry->goal_group, __entry->goal_start,
+ __entry->goal_len, __entry->goal_logical,
+ __entry->result_group, __entry->result_start,
+ __entry->result_len, __entry->result_logical,
+ __entry->found, __entry->groups, __entry->cr,
+ __entry->flags, __entry->tail,
+ __entry->buddy ? 1 << __entry->buddy : 0)
+);
+
+TRACE_EVENT(ext4_mballoc_prealloc,
+ TP_PROTO(struct ext4_allocation_context *ac),
+
+ TP_ARGS(ac),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( ino_t, ino )
+ __field( __u32, orig_logical )
+ __field( int, orig_start )
+ __field( __u32, orig_group )
+ __field( int, orig_len )
+ __field( __u32, result_logical )
+ __field( int, result_start )
+ __field( __u32, result_group )
+ __field( int, result_len )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = ac->ac_inode->i_sb->s_dev;
+ __entry->ino = ac->ac_inode->i_ino;
+ __entry->orig_logical = ac->ac_o_ex.fe_logical;
+ __entry->orig_start = ac->ac_o_ex.fe_start;
+ __entry->orig_group = ac->ac_o_ex.fe_group;
+ __entry->orig_len = ac->ac_o_ex.fe_len;
+ __entry->result_logical = ac->ac_b_ex.fe_logical;
+ __entry->result_start = ac->ac_b_ex.fe_start;
+ __entry->result_group = ac->ac_b_ex.fe_group;
+ __entry->result_len = ac->ac_b_ex.fe_len;
+ ),
+
+ TP_printk("dev %d,%d inode %lu orig %u/%d/%u@%u result %u/%d/%u@%u",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino,
+ __entry->orig_group, __entry->orig_start,
+ __entry->orig_len, __entry->orig_logical,
+ __entry->result_group, __entry->result_start,
+ __entry->result_len, __entry->result_logical)
+);
+
+DECLARE_EVENT_CLASS(ext4__mballoc,
+ TP_PROTO(struct super_block *sb,
+ struct inode *inode,
+ ext4_group_t group,
+ ext4_grpblk_t start,
+ ext4_grpblk_t len),
+
+ TP_ARGS(sb, inode, group, start, len),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( ino_t, ino )
+ __field( int, result_start )
+ __field( __u32, result_group )
+ __field( int, result_len )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = sb->s_dev;
+ __entry->ino = inode ? inode->i_ino : 0;
+ __entry->result_start = start;
+ __entry->result_group = group;
+ __entry->result_len = len;
+ ),
+
+ TP_printk("dev %d,%d inode %lu extent %u/%d/%d ",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino,
+ __entry->result_group, __entry->result_start,
+ __entry->result_len)
+);
+
+DEFINE_EVENT(ext4__mballoc, ext4_mballoc_discard,
+
+ TP_PROTO(struct super_block *sb,
+ struct inode *inode,
+ ext4_group_t group,
+ ext4_grpblk_t start,
+ ext4_grpblk_t len),
+
+ TP_ARGS(sb, inode, group, start, len)
+);
+
+DEFINE_EVENT(ext4__mballoc, ext4_mballoc_free,
+
+ TP_PROTO(struct super_block *sb,
+ struct inode *inode,
+ ext4_group_t group,
+ ext4_grpblk_t start,
+ ext4_grpblk_t len),
+
+ TP_ARGS(sb, inode, group, start, len)
+);
+
+TRACE_EVENT(ext4_forget,
+ TP_PROTO(struct inode *inode, int is_metadata, __u64 block),
+
+ TP_ARGS(inode, is_metadata, block),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( ino_t, ino )
+ __field( umode_t, mode )
+ __field( int, is_metadata )
+ __field( __u64, block )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->mode = inode->i_mode;
+ __entry->is_metadata = is_metadata;
+ __entry->block = block;
+ ),
+
+ TP_printk("dev %d,%d ino %lu mode 0%o is_metadata %d block %llu",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino,
+ __entry->mode, __entry->is_metadata, __entry->block)
+);
+
+TRACE_EVENT(ext4_da_update_reserve_space,
+ TP_PROTO(struct inode *inode, int used_blocks),
+
+ TP_ARGS(inode, used_blocks),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( ino_t, ino )
+ __field( umode_t, mode )
+ __field( __u64, i_blocks )
+ __field( int, used_blocks )
+ __field( int, reserved_data_blocks )
+ __field( int, reserved_meta_blocks )
+ __field( int, allocated_meta_blocks )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->mode = inode->i_mode;
+ __entry->i_blocks = inode->i_blocks;
+ __entry->used_blocks = used_blocks;
+ __entry->reserved_data_blocks = EXT4_I(inode)->i_reserved_data_blocks;
+ __entry->reserved_meta_blocks = EXT4_I(inode)->i_reserved_meta_blocks;
+ __entry->allocated_meta_blocks = EXT4_I(inode)->i_allocated_meta_blocks;
+ ),
+
+ TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu used_blocks %d "
+ "reserved_data_blocks %d reserved_meta_blocks %d "
+ "allocated_meta_blocks %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino,
+ __entry->mode, __entry->i_blocks,
+ __entry->used_blocks, __entry->reserved_data_blocks,
+ __entry->reserved_meta_blocks, __entry->allocated_meta_blocks)
+);
+
+TRACE_EVENT(ext4_da_reserve_space,
+ TP_PROTO(struct inode *inode, int md_needed),
+
+ TP_ARGS(inode, md_needed),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( ino_t, ino )
+ __field( umode_t, mode )
+ __field( __u64, i_blocks )
+ __field( int, md_needed )
+ __field( int, reserved_data_blocks )
+ __field( int, reserved_meta_blocks )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->mode = inode->i_mode;
+ __entry->i_blocks = inode->i_blocks;
+ __entry->md_needed = md_needed;
+ __entry->reserved_data_blocks = EXT4_I(inode)->i_reserved_data_blocks;
+ __entry->reserved_meta_blocks = EXT4_I(inode)->i_reserved_meta_blocks;
+ ),
+
+ TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu md_needed %d "
+ "reserved_data_blocks %d reserved_meta_blocks %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino,
+ __entry->mode, __entry->i_blocks,
+ __entry->md_needed, __entry->reserved_data_blocks,
+ __entry->reserved_meta_blocks)
+);
+
+TRACE_EVENT(ext4_da_release_space,
+ TP_PROTO(struct inode *inode, int freed_blocks),
+
+ TP_ARGS(inode, freed_blocks),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( ino_t, ino )
+ __field( umode_t, mode )
+ __field( __u64, i_blocks )
+ __field( int, freed_blocks )
+ __field( int, reserved_data_blocks )
+ __field( int, reserved_meta_blocks )
+ __field( int, allocated_meta_blocks )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->mode = inode->i_mode;
+ __entry->i_blocks = inode->i_blocks;
+ __entry->freed_blocks = freed_blocks;
+ __entry->reserved_data_blocks = EXT4_I(inode)->i_reserved_data_blocks;
+ __entry->reserved_meta_blocks = EXT4_I(inode)->i_reserved_meta_blocks;
+ __entry->allocated_meta_blocks = EXT4_I(inode)->i_allocated_meta_blocks;
+ ),
+
+ TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu freed_blocks %d "
+ "reserved_data_blocks %d reserved_meta_blocks %d "
+ "allocated_meta_blocks %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino,
+ __entry->mode, __entry->i_blocks,
+ __entry->freed_blocks, __entry->reserved_data_blocks,
+ __entry->reserved_meta_blocks, __entry->allocated_meta_blocks)
+);
+
+DECLARE_EVENT_CLASS(ext4__bitmap_load,
+ TP_PROTO(struct super_block *sb, unsigned long group),
+
+ TP_ARGS(sb, group),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( __u32, group )
+
+ ),
+
+ TP_fast_assign(
+ __entry->dev = sb->s_dev;
+ __entry->group = group;
+ ),
+
+ TP_printk("dev %d,%d group %u",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->group)
+);
+
+DEFINE_EVENT(ext4__bitmap_load, ext4_mb_bitmap_load,
+
+ TP_PROTO(struct super_block *sb, unsigned long group),
+
+ TP_ARGS(sb, group)
+);
+
+DEFINE_EVENT(ext4__bitmap_load, ext4_mb_buddy_bitmap_load,
+
+ TP_PROTO(struct super_block *sb, unsigned long group),
+
+ TP_ARGS(sb, group)
+);
+
+DEFINE_EVENT(ext4__bitmap_load, ext4_read_block_bitmap_load,
+
+ TP_PROTO(struct super_block *sb, unsigned long group),
+
+ TP_ARGS(sb, group)
+);
+
+DEFINE_EVENT(ext4__bitmap_load, ext4_load_inode_bitmap,
+
+ TP_PROTO(struct super_block *sb, unsigned long group),
+
+ TP_ARGS(sb, group)
+);
+
+TRACE_EVENT(ext4_direct_IO_enter,
+ TP_PROTO(struct inode *inode, loff_t offset, unsigned long len, int rw),
+
+ TP_ARGS(inode, offset, len, rw),
+
+ TP_STRUCT__entry(
+ __field( ino_t, ino )
+ __field( dev_t, dev )
+ __field( loff_t, pos )
+ __field( unsigned long, len )
+ __field( int, rw )
+ ),
+
+ TP_fast_assign(
+ __entry->ino = inode->i_ino;
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->pos = offset;
+ __entry->len = len;
+ __entry->rw = rw;
+ ),
+
+ TP_printk("dev %d,%d ino %lu pos %lld len %lu rw %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino,
+ __entry->pos, __entry->len, __entry->rw)
+);
+
+TRACE_EVENT(ext4_direct_IO_exit,
+ TP_PROTO(struct inode *inode, loff_t offset, unsigned long len,
+ int rw, int ret),
+
+ TP_ARGS(inode, offset, len, rw, ret),
+
+ TP_STRUCT__entry(
+ __field( ino_t, ino )
+ __field( dev_t, dev )
+ __field( loff_t, pos )
+ __field( unsigned long, len )
+ __field( int, rw )
+ __field( int, ret )
+ ),
+
+ TP_fast_assign(
+ __entry->ino = inode->i_ino;
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->pos = offset;
+ __entry->len = len;
+ __entry->rw = rw;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("dev %d,%d ino %lu pos %lld len %lu rw %d ret %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino,
+ __entry->pos, __entry->len,
+ __entry->rw, __entry->ret)
+);
+
+TRACE_EVENT(ext4_fallocate_enter,
+ TP_PROTO(struct inode *inode, loff_t offset, loff_t len, int mode),
+
+ TP_ARGS(inode, offset, len, mode),
+
+ TP_STRUCT__entry(
+ __field( ino_t, ino )
+ __field( dev_t, dev )
+ __field( loff_t, pos )
+ __field( loff_t, len )
+ __field( int, mode )
+ ),
+
+ TP_fast_assign(
+ __entry->ino = inode->i_ino;
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->pos = offset;
+ __entry->len = len;
+ __entry->mode = mode;
+ ),
+
+ TP_printk("dev %d,%d ino %lu pos %lld len %lld mode %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino, __entry->pos,
+ __entry->len, __entry->mode)
+);
+
+TRACE_EVENT(ext4_fallocate_exit,
+ TP_PROTO(struct inode *inode, loff_t offset,
+ unsigned int max_blocks, int ret),
+
+ TP_ARGS(inode, offset, max_blocks, ret),
+
+ TP_STRUCT__entry(
+ __field( ino_t, ino )
+ __field( dev_t, dev )
+ __field( loff_t, pos )
+ __field( unsigned int, blocks )
+ __field( int, ret )
+ ),
+
+ TP_fast_assign(
+ __entry->ino = inode->i_ino;
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->pos = offset;
+ __entry->blocks = max_blocks;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("dev %d,%d ino %lu pos %lld blocks %u ret %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino,
+ __entry->pos, __entry->blocks,
+ __entry->ret)
+);
+
+TRACE_EVENT(ext4_unlink_enter,
+ TP_PROTO(struct inode *parent, struct dentry *dentry),
+
+ TP_ARGS(parent, dentry),
+
+ TP_STRUCT__entry(
+ __field( ino_t, parent )
+ __field( ino_t, ino )
+ __field( loff_t, size )
+ __field( dev_t, dev )
+ ),
+
+ TP_fast_assign(
+ __entry->parent = parent->i_ino;
+ __entry->ino = dentry->d_inode->i_ino;
+ __entry->size = dentry->d_inode->i_size;
+ __entry->dev = dentry->d_inode->i_sb->s_dev;
+ ),
+
+ TP_printk("dev %d,%d ino %lu size %lld parent %lu",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino, __entry->size,
+ (unsigned long) __entry->parent)
+);
+
+TRACE_EVENT(ext4_unlink_exit,
+ TP_PROTO(struct dentry *dentry, int ret),
+
+ TP_ARGS(dentry, ret),
+
+ TP_STRUCT__entry(
+ __field( ino_t, ino )
+ __field( dev_t, dev )
+ __field( int, ret )
+ ),
+
+ TP_fast_assign(
+ __entry->ino = dentry->d_inode->i_ino;
+ __entry->dev = dentry->d_inode->i_sb->s_dev;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("dev %d,%d ino %lu ret %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino,
+ __entry->ret)
+);
+
+DECLARE_EVENT_CLASS(ext4__truncate,
+ TP_PROTO(struct inode *inode),
+
+ TP_ARGS(inode),
+
+ TP_STRUCT__entry(
+ __field( ino_t, ino )
+ __field( dev_t, dev )
+ __field( __u64, blocks )
+ ),
+
+ TP_fast_assign(
+ __entry->ino = inode->i_ino;
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->blocks = inode->i_blocks;
+ ),
+
+ TP_printk("dev %d,%d ino %lu blocks %llu",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino, __entry->blocks)
+);
+
+DEFINE_EVENT(ext4__truncate, ext4_truncate_enter,
+
+ TP_PROTO(struct inode *inode),
+
+ TP_ARGS(inode)
+);
+
+DEFINE_EVENT(ext4__truncate, ext4_truncate_exit,
+
+ TP_PROTO(struct inode *inode),
+
+ TP_ARGS(inode)
+);
+
+DECLARE_EVENT_CLASS(ext4__map_blocks_enter,
+ TP_PROTO(struct inode *inode, ext4_lblk_t lblk,
+ unsigned int len, unsigned int flags),
+
+ TP_ARGS(inode, lblk, len, flags),
+
+ TP_STRUCT__entry(
+ __field( ino_t, ino )
+ __field( dev_t, dev )
+ __field( ext4_lblk_t, lblk )
+ __field( unsigned int, len )
+ __field( unsigned int, flags )
+ ),
+
+ TP_fast_assign(
+ __entry->ino = inode->i_ino;
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->lblk = lblk;
+ __entry->len = len;
+ __entry->flags = flags;
+ ),
+
+ TP_printk("dev %d,%d ino %lu lblk %u len %u flags %u",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino,
+ __entry->lblk, __entry->len, __entry->flags)
+);
+
+DEFINE_EVENT(ext4__map_blocks_enter, ext4_ext_map_blocks_enter,
+ TP_PROTO(struct inode *inode, ext4_lblk_t lblk,
+ unsigned len, unsigned flags),
+
+ TP_ARGS(inode, lblk, len, flags)
+);
+
+DEFINE_EVENT(ext4__map_blocks_enter, ext4_ind_map_blocks_enter,
+ TP_PROTO(struct inode *inode, ext4_lblk_t lblk,
+ unsigned len, unsigned flags),
+
+ TP_ARGS(inode, lblk, len, flags)
+);
+
+DECLARE_EVENT_CLASS(ext4__map_blocks_exit,
+ TP_PROTO(struct inode *inode, ext4_lblk_t lblk,
+ ext4_fsblk_t pblk, unsigned int len, int ret),
+
+ TP_ARGS(inode, lblk, pblk, len, ret),
+
+ TP_STRUCT__entry(
+ __field( ino_t, ino )
+ __field( dev_t, dev )
+ __field( ext4_lblk_t, lblk )
+ __field( ext4_fsblk_t, pblk )
+ __field( unsigned int, len )
+ __field( int, ret )
+ ),
+
+ TP_fast_assign(
+ __entry->ino = inode->i_ino;
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->lblk = lblk;
+ __entry->pblk = pblk;
+ __entry->len = len;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("dev %d,%d ino %lu lblk %u pblk %llu len %u ret %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino,
+ __entry->lblk, __entry->pblk,
+ __entry->len, __entry->ret)
+);
+
+DEFINE_EVENT(ext4__map_blocks_exit, ext4_ext_map_blocks_exit,
+ TP_PROTO(struct inode *inode, ext4_lblk_t lblk,
+ ext4_fsblk_t pblk, unsigned len, int ret),
+
+ TP_ARGS(inode, lblk, pblk, len, ret)
+);
+
+DEFINE_EVENT(ext4__map_blocks_exit, ext4_ind_map_blocks_exit,
+ TP_PROTO(struct inode *inode, ext4_lblk_t lblk,
+ ext4_fsblk_t pblk, unsigned len, int ret),
+
+ TP_ARGS(inode, lblk, pblk, len, ret)
+);
+
+TRACE_EVENT(ext4_ext_load_extent,
+ TP_PROTO(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk),
+
+ TP_ARGS(inode, lblk, pblk),
+
+ TP_STRUCT__entry(
+ __field( ino_t, ino )
+ __field( dev_t, dev )
+ __field( ext4_lblk_t, lblk )
+ __field( ext4_fsblk_t, pblk )
+ ),
+
+ TP_fast_assign(
+ __entry->ino = inode->i_ino;
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->lblk = lblk;
+ __entry->pblk = pblk;
+ ),
+
+ TP_printk("dev %d,%d ino %lu lblk %u pblk %llu",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino,
+ __entry->lblk, __entry->pblk)
+);
+
+TRACE_EVENT(ext4_load_inode,
+ TP_PROTO(struct inode *inode),
+
+ TP_ARGS(inode),
+
+ TP_STRUCT__entry(
+ __field( ino_t, ino )
+ __field( dev_t, dev )
+ ),
+
+ TP_fast_assign(
+ __entry->ino = inode->i_ino;
+ __entry->dev = inode->i_sb->s_dev;
+ ),
+
+ TP_printk("dev %d,%d ino %ld",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino)
+);
+
+#endif /* _TRACE_EXT4_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/gfpflags.h b/include/trace/events/gfpflags.h
new file mode 100644
index 00000000..9fe3a366
--- /dev/null
+++ b/include/trace/events/gfpflags.h
@@ -0,0 +1,41 @@
+/*
+ * The order of these masks is important. Matching masks will be seen
+ * first and the left over flags will end up showing by themselves.
+ *
+ * For example, if we have GFP_KERNEL before GFP_USER we wil get:
+ *
+ * GFP_KERNEL|GFP_HARDWALL
+ *
+ * Thus most bits set go first.
+ */
+#define show_gfp_flags(flags) \
+ (flags) ? __print_flags(flags, "|", \
+ {(unsigned long)GFP_TRANSHUGE, "GFP_TRANSHUGE"}, \
+ {(unsigned long)GFP_HIGHUSER_MOVABLE, "GFP_HIGHUSER_MOVABLE"}, \
+ {(unsigned long)GFP_HIGHUSER, "GFP_HIGHUSER"}, \
+ {(unsigned long)GFP_USER, "GFP_USER"}, \
+ {(unsigned long)GFP_TEMPORARY, "GFP_TEMPORARY"}, \
+ {(unsigned long)GFP_KERNEL, "GFP_KERNEL"}, \
+ {(unsigned long)GFP_NOFS, "GFP_NOFS"}, \
+ {(unsigned long)GFP_ATOMIC, "GFP_ATOMIC"}, \
+ {(unsigned long)GFP_NOIO, "GFP_NOIO"}, \
+ {(unsigned long)__GFP_HIGH, "GFP_HIGH"}, \
+ {(unsigned long)__GFP_WAIT, "GFP_WAIT"}, \
+ {(unsigned long)__GFP_IO, "GFP_IO"}, \
+ {(unsigned long)__GFP_COLD, "GFP_COLD"}, \
+ {(unsigned long)__GFP_NOWARN, "GFP_NOWARN"}, \
+ {(unsigned long)__GFP_REPEAT, "GFP_REPEAT"}, \
+ {(unsigned long)__GFP_NOFAIL, "GFP_NOFAIL"}, \
+ {(unsigned long)__GFP_NORETRY, "GFP_NORETRY"}, \
+ {(unsigned long)__GFP_COMP, "GFP_COMP"}, \
+ {(unsigned long)__GFP_ZERO, "GFP_ZERO"}, \
+ {(unsigned long)__GFP_NOMEMALLOC, "GFP_NOMEMALLOC"}, \
+ {(unsigned long)__GFP_HARDWALL, "GFP_HARDWALL"}, \
+ {(unsigned long)__GFP_THISNODE, "GFP_THISNODE"}, \
+ {(unsigned long)__GFP_RECLAIMABLE, "GFP_RECLAIMABLE"}, \
+ {(unsigned long)__GFP_MOVABLE, "GFP_MOVABLE"}, \
+ {(unsigned long)__GFP_NOTRACK, "GFP_NOTRACK"}, \
+ {(unsigned long)__GFP_NO_KSWAPD, "GFP_NO_KSWAPD"}, \
+ {(unsigned long)__GFP_OTHER_NODE, "GFP_OTHER_NODE"} \
+ ) : "GFP_NOWAIT"
+
diff --git a/include/trace/events/gpio.h b/include/trace/events/gpio.h
new file mode 100644
index 00000000..927a8ad9
--- /dev/null
+++ b/include/trace/events/gpio.h
@@ -0,0 +1,56 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM gpio
+
+#if !defined(_TRACE_GPIO_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_GPIO_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(gpio_direction,
+
+ TP_PROTO(unsigned gpio, int in, int err),
+
+ TP_ARGS(gpio, in, err),
+
+ TP_STRUCT__entry(
+ __field(unsigned, gpio)
+ __field(int, in)
+ __field(int, err)
+ ),
+
+ TP_fast_assign(
+ __entry->gpio = gpio;
+ __entry->in = in;
+ __entry->err = err;
+ ),
+
+ TP_printk("%u %3s (%d)", __entry->gpio,
+ __entry->in ? "in" : "out", __entry->err)
+);
+
+TRACE_EVENT(gpio_value,
+
+ TP_PROTO(unsigned gpio, int get, int value),
+
+ TP_ARGS(gpio, get, value),
+
+ TP_STRUCT__entry(
+ __field(unsigned, gpio)
+ __field(int, get)
+ __field(int, value)
+ ),
+
+ TP_fast_assign(
+ __entry->gpio = gpio;
+ __entry->get = get;
+ __entry->value = value;
+ ),
+
+ TP_printk("%u %3s %d", __entry->gpio,
+ __entry->get ? "get" : "set", __entry->value)
+);
+
+#endif /* if !defined(_TRACE_GPIO_H) || defined(TRACE_HEADER_MULTI_READ) */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
new file mode 100644
index 00000000..1c09820d
--- /dev/null
+++ b/include/trace/events/irq.h
@@ -0,0 +1,150 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM irq
+
+#if !defined(_TRACE_IRQ_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_IRQ_H
+
+#include <linux/tracepoint.h>
+
+struct irqaction;
+struct softirq_action;
+
+#define softirq_name(sirq) { sirq##_SOFTIRQ, #sirq }
+#define show_softirq_name(val) \
+ __print_symbolic(val, \
+ softirq_name(HI), \
+ softirq_name(TIMER), \
+ softirq_name(NET_TX), \
+ softirq_name(NET_RX), \
+ softirq_name(BLOCK), \
+ softirq_name(BLOCK_IOPOLL), \
+ softirq_name(TASKLET), \
+ softirq_name(SCHED), \
+ softirq_name(HRTIMER), \
+ softirq_name(RCU))
+
+/**
+ * irq_handler_entry - called immediately before the irq action handler
+ * @irq: irq number
+ * @action: pointer to struct irqaction
+ *
+ * The struct irqaction pointed to by @action contains various
+ * information about the handler, including the device name,
+ * @action->name, and the device id, @action->dev_id. When used in
+ * conjunction with the irq_handler_exit tracepoint, we can figure
+ * out irq handler latencies.
+ */
+TRACE_EVENT(irq_handler_entry,
+
+ TP_PROTO(int irq, struct irqaction *action),
+
+ TP_ARGS(irq, action),
+
+ TP_STRUCT__entry(
+ __field( int, irq )
+ __string( name, action->name )
+ ),
+
+ TP_fast_assign(
+ __entry->irq = irq;
+ __assign_str(name, action->name);
+ ),
+
+ TP_printk("irq=%d name=%s", __entry->irq, __get_str(name))
+);
+
+/**
+ * irq_handler_exit - called immediately after the irq action handler returns
+ * @irq: irq number
+ * @action: pointer to struct irqaction
+ * @ret: return value
+ *
+ * If the @ret value is set to IRQ_HANDLED, then we know that the corresponding
+ * @action->handler scuccessully handled this irq. Otherwise, the irq might be
+ * a shared irq line, or the irq was not handled successfully. Can be used in
+ * conjunction with the irq_handler_entry to understand irq handler latencies.
+ */
+TRACE_EVENT(irq_handler_exit,
+
+ TP_PROTO(int irq, struct irqaction *action, int ret),
+
+ TP_ARGS(irq, action, ret),
+
+ TP_STRUCT__entry(
+ __field( int, irq )
+ __field( int, ret )
+ ),
+
+ TP_fast_assign(
+ __entry->irq = irq;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("irq=%d ret=%s",
+ __entry->irq, __entry->ret ? "handled" : "unhandled")
+);
+
+DECLARE_EVENT_CLASS(softirq,
+
+ TP_PROTO(unsigned int vec_nr),
+
+ TP_ARGS(vec_nr),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, vec )
+ ),
+
+ TP_fast_assign(
+ __entry->vec = vec_nr;
+ ),
+
+ TP_printk("vec=%u [action=%s]", __entry->vec,
+ show_softirq_name(__entry->vec))
+);
+
+/**
+ * softirq_entry - called immediately before the softirq handler
+ * @vec_nr: softirq vector number
+ *
+ * When used in combination with the softirq_exit tracepoint
+ * we can determine the softirq handler runtine.
+ */
+DEFINE_EVENT(softirq, softirq_entry,
+
+ TP_PROTO(unsigned int vec_nr),
+
+ TP_ARGS(vec_nr)
+);
+
+/**
+ * softirq_exit - called immediately after the softirq handler returns
+ * @vec_nr: softirq vector number
+ *
+ * When used in combination with the softirq_entry tracepoint
+ * we can determine the softirq handler runtine.
+ */
+DEFINE_EVENT(softirq, softirq_exit,
+
+ TP_PROTO(unsigned int vec_nr),
+
+ TP_ARGS(vec_nr)
+);
+
+/**
+ * softirq_raise - called immediately when a softirq is raised
+ * @vec_nr: softirq vector number
+ *
+ * When used in combination with the softirq_entry tracepoint
+ * we can determine the softirq raise to run latency.
+ */
+DEFINE_EVENT(softirq, softirq_raise,
+
+ TP_PROTO(unsigned int vec_nr),
+
+ TP_ARGS(vec_nr)
+);
+
+#endif /* _TRACE_IRQ_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/jbd2.h b/include/trace/events/jbd2.h
new file mode 100644
index 00000000..bf16545c
--- /dev/null
+++ b/include/trace/events/jbd2.h
@@ -0,0 +1,233 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM jbd2
+
+#if !defined(_TRACE_JBD2_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_JBD2_H
+
+#include <linux/jbd2.h>
+#include <linux/tracepoint.h>
+
+struct transaction_chp_stats_s;
+struct transaction_run_stats_s;
+
+TRACE_EVENT(jbd2_checkpoint,
+
+ TP_PROTO(journal_t *journal, int result),
+
+ TP_ARGS(journal, result),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( int, result )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = journal->j_fs_dev->bd_dev;
+ __entry->result = result;
+ ),
+
+ TP_printk("dev %s result %d",
+ jbd2_dev_to_name(__entry->dev), __entry->result)
+);
+
+DECLARE_EVENT_CLASS(jbd2_commit,
+
+ TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
+
+ TP_ARGS(journal, commit_transaction),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( char, sync_commit )
+ __field( int, transaction )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = journal->j_fs_dev->bd_dev;
+ __entry->sync_commit = commit_transaction->t_synchronous_commit;
+ __entry->transaction = commit_transaction->t_tid;
+ ),
+
+ TP_printk("dev %s transaction %d sync %d",
+ jbd2_dev_to_name(__entry->dev), __entry->transaction,
+ __entry->sync_commit)
+);
+
+DEFINE_EVENT(jbd2_commit, jbd2_start_commit,
+
+ TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
+
+ TP_ARGS(journal, commit_transaction)
+);
+
+DEFINE_EVENT(jbd2_commit, jbd2_commit_locking,
+
+ TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
+
+ TP_ARGS(journal, commit_transaction)
+);
+
+DEFINE_EVENT(jbd2_commit, jbd2_commit_flushing,
+
+ TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
+
+ TP_ARGS(journal, commit_transaction)
+);
+
+DEFINE_EVENT(jbd2_commit, jbd2_commit_logging,
+
+ TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
+
+ TP_ARGS(journal, commit_transaction)
+);
+
+TRACE_EVENT(jbd2_end_commit,
+ TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
+
+ TP_ARGS(journal, commit_transaction),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( char, sync_commit )
+ __field( int, transaction )
+ __field( int, head )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = journal->j_fs_dev->bd_dev;
+ __entry->sync_commit = commit_transaction->t_synchronous_commit;
+ __entry->transaction = commit_transaction->t_tid;
+ __entry->head = journal->j_tail_sequence;
+ ),
+
+ TP_printk("dev %s transaction %d sync %d head %d",
+ jbd2_dev_to_name(__entry->dev), __entry->transaction,
+ __entry->sync_commit, __entry->head)
+);
+
+TRACE_EVENT(jbd2_submit_inode_data,
+ TP_PROTO(struct inode *inode),
+
+ TP_ARGS(inode),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( ino_t, ino )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ ),
+
+ TP_printk("dev %s ino %lu",
+ jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino)
+);
+
+TRACE_EVENT(jbd2_run_stats,
+ TP_PROTO(dev_t dev, unsigned long tid,
+ struct transaction_run_stats_s *stats),
+
+ TP_ARGS(dev, tid, stats),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( unsigned long, tid )
+ __field( unsigned long, wait )
+ __field( unsigned long, running )
+ __field( unsigned long, locked )
+ __field( unsigned long, flushing )
+ __field( unsigned long, logging )
+ __field( __u32, handle_count )
+ __field( __u32, blocks )
+ __field( __u32, blocks_logged )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev;
+ __entry->tid = tid;
+ __entry->wait = stats->rs_wait;
+ __entry->running = stats->rs_running;
+ __entry->locked = stats->rs_locked;
+ __entry->flushing = stats->rs_flushing;
+ __entry->logging = stats->rs_logging;
+ __entry->handle_count = stats->rs_handle_count;
+ __entry->blocks = stats->rs_blocks;
+ __entry->blocks_logged = stats->rs_blocks_logged;
+ ),
+
+ TP_printk("dev %s tid %lu wait %u running %u locked %u flushing %u "
+ "logging %u handle_count %u blocks %u blocks_logged %u",
+ jbd2_dev_to_name(__entry->dev), __entry->tid,
+ jiffies_to_msecs(__entry->wait),
+ jiffies_to_msecs(__entry->running),
+ jiffies_to_msecs(__entry->locked),
+ jiffies_to_msecs(__entry->flushing),
+ jiffies_to_msecs(__entry->logging),
+ __entry->handle_count, __entry->blocks,
+ __entry->blocks_logged)
+);
+
+TRACE_EVENT(jbd2_checkpoint_stats,
+ TP_PROTO(dev_t dev, unsigned long tid,
+ struct transaction_chp_stats_s *stats),
+
+ TP_ARGS(dev, tid, stats),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( unsigned long, tid )
+ __field( unsigned long, chp_time )
+ __field( __u32, forced_to_close )
+ __field( __u32, written )
+ __field( __u32, dropped )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev;
+ __entry->tid = tid;
+ __entry->chp_time = stats->cs_chp_time;
+ __entry->forced_to_close= stats->cs_forced_to_close;
+ __entry->written = stats->cs_written;
+ __entry->dropped = stats->cs_dropped;
+ ),
+
+ TP_printk("dev %s tid %lu chp_time %u forced_to_close %u "
+ "written %u dropped %u",
+ jbd2_dev_to_name(__entry->dev), __entry->tid,
+ jiffies_to_msecs(__entry->chp_time),
+ __entry->forced_to_close, __entry->written, __entry->dropped)
+);
+
+TRACE_EVENT(jbd2_cleanup_journal_tail,
+
+ TP_PROTO(journal_t *journal, tid_t first_tid,
+ unsigned long block_nr, unsigned long freed),
+
+ TP_ARGS(journal, first_tid, block_nr, freed),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( tid_t, tail_sequence )
+ __field( tid_t, first_tid )
+ __field(unsigned long, block_nr )
+ __field(unsigned long, freed )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = journal->j_fs_dev->bd_dev;
+ __entry->tail_sequence = journal->j_tail_sequence;
+ __entry->first_tid = first_tid;
+ __entry->block_nr = block_nr;
+ __entry->freed = freed;
+ ),
+
+ TP_printk("dev %s from %u to %u offset %lu freed %lu",
+ jbd2_dev_to_name(__entry->dev), __entry->tail_sequence,
+ __entry->first_tid, __entry->block_nr, __entry->freed)
+);
+
+#endif /* _TRACE_JBD2_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
new file mode 100644
index 00000000..a9c87ad8
--- /dev/null
+++ b/include/trace/events/kmem.h
@@ -0,0 +1,308 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM kmem
+
+#if !defined(_TRACE_KMEM_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_KMEM_H
+
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+#include "gfpflags.h"
+
+DECLARE_EVENT_CLASS(kmem_alloc,
+
+ TP_PROTO(unsigned long call_site,
+ const void *ptr,
+ size_t bytes_req,
+ size_t bytes_alloc,
+ gfp_t gfp_flags),
+
+ TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, call_site )
+ __field( const void *, ptr )
+ __field( size_t, bytes_req )
+ __field( size_t, bytes_alloc )
+ __field( gfp_t, gfp_flags )
+ ),
+
+ TP_fast_assign(
+ __entry->call_site = call_site;
+ __entry->ptr = ptr;
+ __entry->bytes_req = bytes_req;
+ __entry->bytes_alloc = bytes_alloc;
+ __entry->gfp_flags = gfp_flags;
+ ),
+
+ TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s",
+ __entry->call_site,
+ __entry->ptr,
+ __entry->bytes_req,
+ __entry->bytes_alloc,
+ show_gfp_flags(__entry->gfp_flags))
+);
+
+DEFINE_EVENT(kmem_alloc, kmalloc,
+
+ TP_PROTO(unsigned long call_site, const void *ptr,
+ size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
+
+ TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)
+);
+
+DEFINE_EVENT(kmem_alloc, kmem_cache_alloc,
+
+ TP_PROTO(unsigned long call_site, const void *ptr,
+ size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
+
+ TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)
+);
+
+DECLARE_EVENT_CLASS(kmem_alloc_node,
+
+ TP_PROTO(unsigned long call_site,
+ const void *ptr,
+ size_t bytes_req,
+ size_t bytes_alloc,
+ gfp_t gfp_flags,
+ int node),
+
+ TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, call_site )
+ __field( const void *, ptr )
+ __field( size_t, bytes_req )
+ __field( size_t, bytes_alloc )
+ __field( gfp_t, gfp_flags )
+ __field( int, node )
+ ),
+
+ TP_fast_assign(
+ __entry->call_site = call_site;
+ __entry->ptr = ptr;
+ __entry->bytes_req = bytes_req;
+ __entry->bytes_alloc = bytes_alloc;
+ __entry->gfp_flags = gfp_flags;
+ __entry->node = node;
+ ),
+
+ TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d",
+ __entry->call_site,
+ __entry->ptr,
+ __entry->bytes_req,
+ __entry->bytes_alloc,
+ show_gfp_flags(__entry->gfp_flags),
+ __entry->node)
+);
+
+DEFINE_EVENT(kmem_alloc_node, kmalloc_node,
+
+ TP_PROTO(unsigned long call_site, const void *ptr,
+ size_t bytes_req, size_t bytes_alloc,
+ gfp_t gfp_flags, int node),
+
+ TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node)
+);
+
+DEFINE_EVENT(kmem_alloc_node, kmem_cache_alloc_node,
+
+ TP_PROTO(unsigned long call_site, const void *ptr,
+ size_t bytes_req, size_t bytes_alloc,
+ gfp_t gfp_flags, int node),
+
+ TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node)
+);
+
+DECLARE_EVENT_CLASS(kmem_free,
+
+ TP_PROTO(unsigned long call_site, const void *ptr),
+
+ TP_ARGS(call_site, ptr),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, call_site )
+ __field( const void *, ptr )
+ ),
+
+ TP_fast_assign(
+ __entry->call_site = call_site;
+ __entry->ptr = ptr;
+ ),
+
+ TP_printk("call_site=%lx ptr=%p", __entry->call_site, __entry->ptr)
+);
+
+DEFINE_EVENT(kmem_free, kfree,
+
+ TP_PROTO(unsigned long call_site, const void *ptr),
+
+ TP_ARGS(call_site, ptr)
+);
+
+DEFINE_EVENT(kmem_free, kmem_cache_free,
+
+ TP_PROTO(unsigned long call_site, const void *ptr),
+
+ TP_ARGS(call_site, ptr)
+);
+
+TRACE_EVENT(mm_page_free_direct,
+
+ TP_PROTO(struct page *page, unsigned int order),
+
+ TP_ARGS(page, order),
+
+ TP_STRUCT__entry(
+ __field( struct page *, page )
+ __field( unsigned int, order )
+ ),
+
+ TP_fast_assign(
+ __entry->page = page;
+ __entry->order = order;
+ ),
+
+ TP_printk("page=%p pfn=%lu order=%d",
+ __entry->page,
+ page_to_pfn(__entry->page),
+ __entry->order)
+);
+
+TRACE_EVENT(mm_pagevec_free,
+
+ TP_PROTO(struct page *page, int cold),
+
+ TP_ARGS(page, cold),
+
+ TP_STRUCT__entry(
+ __field( struct page *, page )
+ __field( int, cold )
+ ),
+
+ TP_fast_assign(
+ __entry->page = page;
+ __entry->cold = cold;
+ ),
+
+ TP_printk("page=%p pfn=%lu order=0 cold=%d",
+ __entry->page,
+ page_to_pfn(__entry->page),
+ __entry->cold)
+);
+
+TRACE_EVENT(mm_page_alloc,
+
+ TP_PROTO(struct page *page, unsigned int order,
+ gfp_t gfp_flags, int migratetype),
+
+ TP_ARGS(page, order, gfp_flags, migratetype),
+
+ TP_STRUCT__entry(
+ __field( struct page *, page )
+ __field( unsigned int, order )
+ __field( gfp_t, gfp_flags )
+ __field( int, migratetype )
+ ),
+
+ TP_fast_assign(
+ __entry->page = page;
+ __entry->order = order;
+ __entry->gfp_flags = gfp_flags;
+ __entry->migratetype = migratetype;
+ ),
+
+ TP_printk("page=%p pfn=%lu order=%d migratetype=%d gfp_flags=%s",
+ __entry->page,
+ page_to_pfn(__entry->page),
+ __entry->order,
+ __entry->migratetype,
+ show_gfp_flags(__entry->gfp_flags))
+);
+
+DECLARE_EVENT_CLASS(mm_page,
+
+ TP_PROTO(struct page *page, unsigned int order, int migratetype),
+
+ TP_ARGS(page, order, migratetype),
+
+ TP_STRUCT__entry(
+ __field( struct page *, page )
+ __field( unsigned int, order )
+ __field( int, migratetype )
+ ),
+
+ TP_fast_assign(
+ __entry->page = page;
+ __entry->order = order;
+ __entry->migratetype = migratetype;
+ ),
+
+ TP_printk("page=%p pfn=%lu order=%u migratetype=%d percpu_refill=%d",
+ __entry->page,
+ page_to_pfn(__entry->page),
+ __entry->order,
+ __entry->migratetype,
+ __entry->order == 0)
+);
+
+DEFINE_EVENT(mm_page, mm_page_alloc_zone_locked,
+
+ TP_PROTO(struct page *page, unsigned int order, int migratetype),
+
+ TP_ARGS(page, order, migratetype)
+);
+
+DEFINE_EVENT_PRINT(mm_page, mm_page_pcpu_drain,
+
+ TP_PROTO(struct page *page, unsigned int order, int migratetype),
+
+ TP_ARGS(page, order, migratetype),
+
+ TP_printk("page=%p pfn=%lu order=%d migratetype=%d",
+ __entry->page, page_to_pfn(__entry->page),
+ __entry->order, __entry->migratetype)
+);
+
+TRACE_EVENT(mm_page_alloc_extfrag,
+
+ TP_PROTO(struct page *page,
+ int alloc_order, int fallback_order,
+ int alloc_migratetype, int fallback_migratetype),
+
+ TP_ARGS(page,
+ alloc_order, fallback_order,
+ alloc_migratetype, fallback_migratetype),
+
+ TP_STRUCT__entry(
+ __field( struct page *, page )
+ __field( int, alloc_order )
+ __field( int, fallback_order )
+ __field( int, alloc_migratetype )
+ __field( int, fallback_migratetype )
+ ),
+
+ TP_fast_assign(
+ __entry->page = page;
+ __entry->alloc_order = alloc_order;
+ __entry->fallback_order = fallback_order;
+ __entry->alloc_migratetype = alloc_migratetype;
+ __entry->fallback_migratetype = fallback_migratetype;
+ ),
+
+ TP_printk("page=%p pfn=%lu alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d",
+ __entry->page,
+ page_to_pfn(__entry->page),
+ __entry->alloc_order,
+ __entry->fallback_order,
+ pageblock_order,
+ __entry->alloc_migratetype,
+ __entry->fallback_migratetype,
+ __entry->fallback_order < pageblock_order,
+ __entry->alloc_migratetype == __entry->fallback_migratetype)
+);
+
+#endif /* _TRACE_KMEM_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h
new file mode 100644
index 00000000..46e3cd8e
--- /dev/null
+++ b/include/trace/events/kvm.h
@@ -0,0 +1,312 @@
+#if !defined(_TRACE_KVM_MAIN_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_KVM_MAIN_H
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM kvm
+
+#define ERSN(x) { KVM_EXIT_##x, "KVM_EXIT_" #x }
+
+#define kvm_trace_exit_reason \
+ ERSN(UNKNOWN), ERSN(EXCEPTION), ERSN(IO), ERSN(HYPERCALL), \
+ ERSN(DEBUG), ERSN(HLT), ERSN(MMIO), ERSN(IRQ_WINDOW_OPEN), \
+ ERSN(SHUTDOWN), ERSN(FAIL_ENTRY), ERSN(INTR), ERSN(SET_TPR), \
+ ERSN(TPR_ACCESS), ERSN(S390_SIEIC), ERSN(S390_RESET), ERSN(DCR),\
+ ERSN(NMI), ERSN(INTERNAL_ERROR), ERSN(OSI)
+
+TRACE_EVENT(kvm_userspace_exit,
+ TP_PROTO(__u32 reason, int errno),
+ TP_ARGS(reason, errno),
+
+ TP_STRUCT__entry(
+ __field( __u32, reason )
+ __field( int, errno )
+ ),
+
+ TP_fast_assign(
+ __entry->reason = reason;
+ __entry->errno = errno;
+ ),
+
+ TP_printk("reason %s (%d)",
+ __entry->errno < 0 ?
+ (__entry->errno == -EINTR ? "restart" : "error") :
+ __print_symbolic(__entry->reason, kvm_trace_exit_reason),
+ __entry->errno < 0 ? -__entry->errno : __entry->reason)
+);
+
+#if defined(__KVM_HAVE_IOAPIC)
+TRACE_EVENT(kvm_set_irq,
+ TP_PROTO(unsigned int gsi, int level, int irq_source_id),
+ TP_ARGS(gsi, level, irq_source_id),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, gsi )
+ __field( int, level )
+ __field( int, irq_source_id )
+ ),
+
+ TP_fast_assign(
+ __entry->gsi = gsi;
+ __entry->level = level;
+ __entry->irq_source_id = irq_source_id;
+ ),
+
+ TP_printk("gsi %u level %d source %d",
+ __entry->gsi, __entry->level, __entry->irq_source_id)
+);
+
+#define kvm_deliver_mode \
+ {0x0, "Fixed"}, \
+ {0x1, "LowPrio"}, \
+ {0x2, "SMI"}, \
+ {0x3, "Res3"}, \
+ {0x4, "NMI"}, \
+ {0x5, "INIT"}, \
+ {0x6, "SIPI"}, \
+ {0x7, "ExtINT"}
+
+TRACE_EVENT(kvm_ioapic_set_irq,
+ TP_PROTO(__u64 e, int pin, bool coalesced),
+ TP_ARGS(e, pin, coalesced),
+
+ TP_STRUCT__entry(
+ __field( __u64, e )
+ __field( int, pin )
+ __field( bool, coalesced )
+ ),
+
+ TP_fast_assign(
+ __entry->e = e;
+ __entry->pin = pin;
+ __entry->coalesced = coalesced;
+ ),
+
+ TP_printk("pin %u dst %x vec=%u (%s|%s|%s%s)%s",
+ __entry->pin, (u8)(__entry->e >> 56), (u8)__entry->e,
+ __print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
+ (__entry->e & (1<<11)) ? "logical" : "physical",
+ (__entry->e & (1<<15)) ? "level" : "edge",
+ (__entry->e & (1<<16)) ? "|masked" : "",
+ __entry->coalesced ? " (coalesced)" : "")
+);
+
+TRACE_EVENT(kvm_msi_set_irq,
+ TP_PROTO(__u64 address, __u64 data),
+ TP_ARGS(address, data),
+
+ TP_STRUCT__entry(
+ __field( __u64, address )
+ __field( __u64, data )
+ ),
+
+ TP_fast_assign(
+ __entry->address = address;
+ __entry->data = data;
+ ),
+
+ TP_printk("dst %u vec %x (%s|%s|%s%s)",
+ (u8)(__entry->address >> 12), (u8)__entry->data,
+ __print_symbolic((__entry->data >> 8 & 0x7), kvm_deliver_mode),
+ (__entry->address & (1<<2)) ? "logical" : "physical",
+ (__entry->data & (1<<15)) ? "level" : "edge",
+ (__entry->address & (1<<3)) ? "|rh" : "")
+);
+
+#define kvm_irqchips \
+ {KVM_IRQCHIP_PIC_MASTER, "PIC master"}, \
+ {KVM_IRQCHIP_PIC_SLAVE, "PIC slave"}, \
+ {KVM_IRQCHIP_IOAPIC, "IOAPIC"}
+
+TRACE_EVENT(kvm_ack_irq,
+ TP_PROTO(unsigned int irqchip, unsigned int pin),
+ TP_ARGS(irqchip, pin),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, irqchip )
+ __field( unsigned int, pin )
+ ),
+
+ TP_fast_assign(
+ __entry->irqchip = irqchip;
+ __entry->pin = pin;
+ ),
+
+ TP_printk("irqchip %s pin %u",
+ __print_symbolic(__entry->irqchip, kvm_irqchips),
+ __entry->pin)
+);
+
+
+
+#endif /* defined(__KVM_HAVE_IOAPIC) */
+
+#define KVM_TRACE_MMIO_READ_UNSATISFIED 0
+#define KVM_TRACE_MMIO_READ 1
+#define KVM_TRACE_MMIO_WRITE 2
+
+#define kvm_trace_symbol_mmio \
+ { KVM_TRACE_MMIO_READ_UNSATISFIED, "unsatisfied-read" }, \
+ { KVM_TRACE_MMIO_READ, "read" }, \
+ { KVM_TRACE_MMIO_WRITE, "write" }
+
+TRACE_EVENT(kvm_mmio,
+ TP_PROTO(int type, int len, u64 gpa, u64 val),
+ TP_ARGS(type, len, gpa, val),
+
+ TP_STRUCT__entry(
+ __field( u32, type )
+ __field( u32, len )
+ __field( u64, gpa )
+ __field( u64, val )
+ ),
+
+ TP_fast_assign(
+ __entry->type = type;
+ __entry->len = len;
+ __entry->gpa = gpa;
+ __entry->val = val;
+ ),
+
+ TP_printk("mmio %s len %u gpa 0x%llx val 0x%llx",
+ __print_symbolic(__entry->type, kvm_trace_symbol_mmio),
+ __entry->len, __entry->gpa, __entry->val)
+);
+
+#define kvm_fpu_load_symbol \
+ {0, "unload"}, \
+ {1, "load"}
+
+TRACE_EVENT(kvm_fpu,
+ TP_PROTO(int load),
+ TP_ARGS(load),
+
+ TP_STRUCT__entry(
+ __field( u32, load )
+ ),
+
+ TP_fast_assign(
+ __entry->load = load;
+ ),
+
+ TP_printk("%s", __print_symbolic(__entry->load, kvm_fpu_load_symbol))
+);
+
+TRACE_EVENT(kvm_age_page,
+ TP_PROTO(ulong hva, struct kvm_memory_slot *slot, int ref),
+ TP_ARGS(hva, slot, ref),
+
+ TP_STRUCT__entry(
+ __field( u64, hva )
+ __field( u64, gfn )
+ __field( u8, referenced )
+ ),
+
+ TP_fast_assign(
+ __entry->hva = hva;
+ __entry->gfn =
+ slot->base_gfn + ((hva - slot->userspace_addr) >> PAGE_SHIFT);
+ __entry->referenced = ref;
+ ),
+
+ TP_printk("hva %llx gfn %llx %s",
+ __entry->hva, __entry->gfn,
+ __entry->referenced ? "YOUNG" : "OLD")
+);
+
+#ifdef CONFIG_KVM_ASYNC_PF
+DECLARE_EVENT_CLASS(kvm_async_get_page_class,
+
+ TP_PROTO(u64 gva, u64 gfn),
+
+ TP_ARGS(gva, gfn),
+
+ TP_STRUCT__entry(
+ __field(__u64, gva)
+ __field(u64, gfn)
+ ),
+
+ TP_fast_assign(
+ __entry->gva = gva;
+ __entry->gfn = gfn;
+ ),
+
+ TP_printk("gva = %#llx, gfn = %#llx", __entry->gva, __entry->gfn)
+);
+
+DEFINE_EVENT(kvm_async_get_page_class, kvm_try_async_get_page,
+
+ TP_PROTO(u64 gva, u64 gfn),
+
+ TP_ARGS(gva, gfn)
+);
+
+DEFINE_EVENT(kvm_async_get_page_class, kvm_async_pf_doublefault,
+
+ TP_PROTO(u64 gva, u64 gfn),
+
+ TP_ARGS(gva, gfn)
+);
+
+DECLARE_EVENT_CLASS(kvm_async_pf_nopresent_ready,
+
+ TP_PROTO(u64 token, u64 gva),
+
+ TP_ARGS(token, gva),
+
+ TP_STRUCT__entry(
+ __field(__u64, token)
+ __field(__u64, gva)
+ ),
+
+ TP_fast_assign(
+ __entry->token = token;
+ __entry->gva = gva;
+ ),
+
+ TP_printk("token %#llx gva %#llx", __entry->token, __entry->gva)
+
+);
+
+DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_not_present,
+
+ TP_PROTO(u64 token, u64 gva),
+
+ TP_ARGS(token, gva)
+);
+
+DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_ready,
+
+ TP_PROTO(u64 token, u64 gva),
+
+ TP_ARGS(token, gva)
+);
+
+TRACE_EVENT(
+ kvm_async_pf_completed,
+ TP_PROTO(unsigned long address, struct page *page, u64 gva),
+ TP_ARGS(address, page, gva),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, address)
+ __field(pfn_t, pfn)
+ __field(u64, gva)
+ ),
+
+ TP_fast_assign(
+ __entry->address = address;
+ __entry->pfn = page ? page_to_pfn(page) : 0;
+ __entry->gva = gva;
+ ),
+
+ TP_printk("gva %#llx address %#lx pfn %#llx", __entry->gva,
+ __entry->address, __entry->pfn)
+);
+
+#endif
+
+#endif /* _TRACE_KVM_MAIN_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/lock.h b/include/trace/events/lock.h
new file mode 100644
index 00000000..2821b86d
--- /dev/null
+++ b/include/trace/events/lock.h
@@ -0,0 +1,86 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM lock
+
+#if !defined(_TRACE_LOCK_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_LOCK_H
+
+#include <linux/lockdep.h>
+#include <linux/tracepoint.h>
+
+#ifdef CONFIG_LOCKDEP
+
+TRACE_EVENT(lock_acquire,
+
+ TP_PROTO(struct lockdep_map *lock, unsigned int subclass,
+ int trylock, int read, int check,
+ struct lockdep_map *next_lock, unsigned long ip),
+
+ TP_ARGS(lock, subclass, trylock, read, check, next_lock, ip),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, flags)
+ __string(name, lock->name)
+ __field(void *, lockdep_addr)
+ ),
+
+ TP_fast_assign(
+ __entry->flags = (trylock ? 1 : 0) | (read ? 2 : 0);
+ __assign_str(name, lock->name);
+ __entry->lockdep_addr = lock;
+ ),
+
+ TP_printk("%p %s%s%s", __entry->lockdep_addr,
+ (__entry->flags & 1) ? "try " : "",
+ (__entry->flags & 2) ? "read " : "",
+ __get_str(name))
+);
+
+DECLARE_EVENT_CLASS(lock,
+
+ TP_PROTO(struct lockdep_map *lock, unsigned long ip),
+
+ TP_ARGS(lock, ip),
+
+ TP_STRUCT__entry(
+ __string( name, lock->name )
+ __field( void *, lockdep_addr )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, lock->name);
+ __entry->lockdep_addr = lock;
+ ),
+
+ TP_printk("%p %s", __entry->lockdep_addr, __get_str(name))
+);
+
+DEFINE_EVENT(lock, lock_release,
+
+ TP_PROTO(struct lockdep_map *lock, unsigned long ip),
+
+ TP_ARGS(lock, ip)
+);
+
+#ifdef CONFIG_LOCK_STAT
+
+DEFINE_EVENT(lock, lock_contended,
+
+ TP_PROTO(struct lockdep_map *lock, unsigned long ip),
+
+ TP_ARGS(lock, ip)
+);
+
+DEFINE_EVENT(lock, lock_acquired,
+
+ TP_PROTO(struct lockdep_map *lock, unsigned long ip),
+
+ TP_ARGS(lock, ip)
+);
+
+#endif
+#endif
+
+#endif /* _TRACE_LOCK_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/mce.h b/include/trace/events/mce.h
new file mode 100644
index 00000000..4cbbcef6
--- /dev/null
+++ b/include/trace/events/mce.h
@@ -0,0 +1,69 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mce
+
+#if !defined(_TRACE_MCE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_MCE_H
+
+#include <linux/ktime.h>
+#include <linux/tracepoint.h>
+#include <asm/mce.h>
+
+TRACE_EVENT(mce_record,
+
+ TP_PROTO(struct mce *m),
+
+ TP_ARGS(m),
+
+ TP_STRUCT__entry(
+ __field( u64, mcgcap )
+ __field( u64, mcgstatus )
+ __field( u64, status )
+ __field( u64, addr )
+ __field( u64, misc )
+ __field( u64, ip )
+ __field( u64, tsc )
+ __field( u64, walltime )
+ __field( u32, cpu )
+ __field( u32, cpuid )
+ __field( u32, apicid )
+ __field( u32, socketid )
+ __field( u8, cs )
+ __field( u8, bank )
+ __field( u8, cpuvendor )
+ ),
+
+ TP_fast_assign(
+ __entry->mcgcap = m->mcgcap;
+ __entry->mcgstatus = m->mcgstatus;
+ __entry->status = m->status;
+ __entry->addr = m->addr;
+ __entry->misc = m->misc;
+ __entry->ip = m->ip;
+ __entry->tsc = m->tsc;
+ __entry->walltime = m->time;
+ __entry->cpu = m->extcpu;
+ __entry->cpuid = m->cpuid;
+ __entry->apicid = m->apicid;
+ __entry->socketid = m->socketid;
+ __entry->cs = m->cs;
+ __entry->bank = m->bank;
+ __entry->cpuvendor = m->cpuvendor;
+ ),
+
+ TP_printk("CPU: %d, MCGc/s: %llx/%llx, MC%d: %016Lx, ADDR/MISC: %016Lx/%016Lx, RIP: %02x:<%016Lx>, TSC: %llx, PROCESSOR: %u:%x, TIME: %llu, SOCKET: %u, APIC: %x",
+ __entry->cpu,
+ __entry->mcgcap, __entry->mcgstatus,
+ __entry->bank, __entry->status,
+ __entry->addr, __entry->misc,
+ __entry->cs, __entry->ip,
+ __entry->tsc,
+ __entry->cpuvendor, __entry->cpuid,
+ __entry->walltime,
+ __entry->socketid,
+ __entry->apicid)
+);
+
+#endif /* _TRACE_MCE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/module.h b/include/trace/events/module.h
new file mode 100644
index 00000000..21a546d2
--- /dev/null
+++ b/include/trace/events/module.h
@@ -0,0 +1,131 @@
+/*
+ * Because linux/module.h has tracepoints in the header, and ftrace.h
+ * eventually includes this file, define_trace.h includes linux/module.h
+ * But we do not want the module.h to override the TRACE_SYSTEM macro
+ * variable that define_trace.h is processing, so we only set it
+ * when module events are being processed, which would happen when
+ * CREATE_TRACE_POINTS is defined.
+ */
+#ifdef CREATE_TRACE_POINTS
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM module
+#endif
+
+#if !defined(_TRACE_MODULE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_MODULE_H
+
+#include <linux/tracepoint.h>
+
+#ifdef CONFIG_MODULES
+
+struct module;
+
+#define show_module_flags(flags) __print_flags(flags, "", \
+ { (1UL << TAINT_PROPRIETARY_MODULE), "P" }, \
+ { (1UL << TAINT_FORCED_MODULE), "F" }, \
+ { (1UL << TAINT_CRAP), "C" })
+
+TRACE_EVENT(module_load,
+
+ TP_PROTO(struct module *mod),
+
+ TP_ARGS(mod),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, taints )
+ __string( name, mod->name )
+ ),
+
+ TP_fast_assign(
+ __entry->taints = mod->taints;
+ __assign_str(name, mod->name);
+ ),
+
+ TP_printk("%s %s", __get_str(name), show_module_flags(__entry->taints))
+);
+
+TRACE_EVENT(module_free,
+
+ TP_PROTO(struct module *mod),
+
+ TP_ARGS(mod),
+
+ TP_STRUCT__entry(
+ __string( name, mod->name )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, mod->name);
+ ),
+
+ TP_printk("%s", __get_str(name))
+);
+
+#ifdef CONFIG_MODULE_UNLOAD
+/* trace_module_get/put are only used if CONFIG_MODULE_UNLOAD is defined */
+
+DECLARE_EVENT_CLASS(module_refcnt,
+
+ TP_PROTO(struct module *mod, unsigned long ip),
+
+ TP_ARGS(mod, ip),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, ip )
+ __field( int, refcnt )
+ __string( name, mod->name )
+ ),
+
+ TP_fast_assign(
+ __entry->ip = ip;
+ __entry->refcnt = __this_cpu_read(mod->refptr->incs) + __this_cpu_read(mod->refptr->decs);
+ __assign_str(name, mod->name);
+ ),
+
+ TP_printk("%s call_site=%pf refcnt=%d",
+ __get_str(name), (void *)__entry->ip, __entry->refcnt)
+);
+
+DEFINE_EVENT(module_refcnt, module_get,
+
+ TP_PROTO(struct module *mod, unsigned long ip),
+
+ TP_ARGS(mod, ip)
+);
+
+DEFINE_EVENT(module_refcnt, module_put,
+
+ TP_PROTO(struct module *mod, unsigned long ip),
+
+ TP_ARGS(mod, ip)
+);
+#endif /* CONFIG_MODULE_UNLOAD */
+
+TRACE_EVENT(module_request,
+
+ TP_PROTO(char *name, bool wait, unsigned long ip),
+
+ TP_ARGS(name, wait, ip),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, ip )
+ __field( bool, wait )
+ __string( name, name )
+ ),
+
+ TP_fast_assign(
+ __entry->ip = ip;
+ __entry->wait = wait;
+ __assign_str(name, name);
+ ),
+
+ TP_printk("%s wait=%d call_site=%pf",
+ __get_str(name), (int)__entry->wait, (void *)__entry->ip)
+);
+
+#endif /* CONFIG_MODULES */
+
+#endif /* _TRACE_MODULE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/napi.h b/include/trace/events/napi.h
new file mode 100644
index 00000000..8fe1e93f
--- /dev/null
+++ b/include/trace/events/napi.h
@@ -0,0 +1,38 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM napi
+
+#if !defined(_TRACE_NAPI_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_NAPI_H_
+
+#include <linux/netdevice.h>
+#include <linux/tracepoint.h>
+#include <linux/ftrace.h>
+
+#define NO_DEV "(no_device)"
+
+TRACE_EVENT(napi_poll,
+
+ TP_PROTO(struct napi_struct *napi),
+
+ TP_ARGS(napi),
+
+ TP_STRUCT__entry(
+ __field( struct napi_struct *, napi)
+ __string( dev_name, napi->dev ? napi->dev->name : NO_DEV)
+ ),
+
+ TP_fast_assign(
+ __entry->napi = napi;
+ __assign_str(dev_name, napi->dev ? napi->dev->name : NO_DEV);
+ ),
+
+ TP_printk("napi poll on napi struct %p for device %s",
+ __entry->napi, __get_str(dev_name))
+);
+
+#undef NO_DEV
+
+#endif /* _TRACE_NAPI_H_ */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/net.h b/include/trace/events/net.h
new file mode 100644
index 00000000..f99645d0
--- /dev/null
+++ b/include/trace/events/net.h
@@ -0,0 +1,84 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM net
+
+#if !defined(_TRACE_NET_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_NET_H
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/ip.h>
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(net_dev_xmit,
+
+ TP_PROTO(struct sk_buff *skb,
+ int rc,
+ struct net_device *dev,
+ unsigned int skb_len),
+
+ TP_ARGS(skb, rc, dev, skb_len),
+
+ TP_STRUCT__entry(
+ __field( void *, skbaddr )
+ __field( unsigned int, len )
+ __field( int, rc )
+ __string( name, dev->name )
+ ),
+
+ TP_fast_assign(
+ __entry->skbaddr = skb;
+ __entry->len = skb_len;
+ __entry->rc = rc;
+ __assign_str(name, dev->name);
+ ),
+
+ TP_printk("dev=%s skbaddr=%p len=%u rc=%d",
+ __get_str(name), __entry->skbaddr, __entry->len, __entry->rc)
+);
+
+DECLARE_EVENT_CLASS(net_dev_template,
+
+ TP_PROTO(struct sk_buff *skb),
+
+ TP_ARGS(skb),
+
+ TP_STRUCT__entry(
+ __field( void *, skbaddr )
+ __field( unsigned int, len )
+ __string( name, skb->dev->name )
+ ),
+
+ TP_fast_assign(
+ __entry->skbaddr = skb;
+ __entry->len = skb->len;
+ __assign_str(name, skb->dev->name);
+ ),
+
+ TP_printk("dev=%s skbaddr=%p len=%u",
+ __get_str(name), __entry->skbaddr, __entry->len)
+)
+
+DEFINE_EVENT(net_dev_template, net_dev_queue,
+
+ TP_PROTO(struct sk_buff *skb),
+
+ TP_ARGS(skb)
+);
+
+DEFINE_EVENT(net_dev_template, netif_receive_skb,
+
+ TP_PROTO(struct sk_buff *skb),
+
+ TP_ARGS(skb)
+);
+
+DEFINE_EVENT(net_dev_template, netif_rx,
+
+ TP_PROTO(struct sk_buff *skb),
+
+ TP_ARGS(skb)
+);
+#endif /* _TRACE_NET_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/power.h b/include/trace/events/power.h
new file mode 100644
index 00000000..1bcc2a8c
--- /dev/null
+++ b/include/trace/events/power.h
@@ -0,0 +1,240 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM power
+
+#if !defined(_TRACE_POWER_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_POWER_H
+
+#include <linux/ktime.h>
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(cpu,
+
+ TP_PROTO(unsigned int state, unsigned int cpu_id),
+
+ TP_ARGS(state, cpu_id),
+
+ TP_STRUCT__entry(
+ __field( u32, state )
+ __field( u32, cpu_id )
+ ),
+
+ TP_fast_assign(
+ __entry->state = state;
+ __entry->cpu_id = cpu_id;
+ ),
+
+ TP_printk("state=%lu cpu_id=%lu", (unsigned long)__entry->state,
+ (unsigned long)__entry->cpu_id)
+);
+
+DEFINE_EVENT(cpu, cpu_idle,
+
+ TP_PROTO(unsigned int state, unsigned int cpu_id),
+
+ TP_ARGS(state, cpu_id)
+);
+
+/* This file can get included multiple times, TRACE_HEADER_MULTI_READ at top */
+#ifndef _PWR_EVENT_AVOID_DOUBLE_DEFINING
+#define _PWR_EVENT_AVOID_DOUBLE_DEFINING
+
+#define PWR_EVENT_EXIT -1
+#endif
+
+DEFINE_EVENT(cpu, cpu_frequency,
+
+ TP_PROTO(unsigned int frequency, unsigned int cpu_id),
+
+ TP_ARGS(frequency, cpu_id)
+);
+
+TRACE_EVENT(machine_suspend,
+
+ TP_PROTO(unsigned int state),
+
+ TP_ARGS(state),
+
+ TP_STRUCT__entry(
+ __field( u32, state )
+ ),
+
+ TP_fast_assign(
+ __entry->state = state;
+ ),
+
+ TP_printk("state=%lu", (unsigned long)__entry->state)
+);
+
+/* This code will be removed after deprecation time exceeded (2.6.41) */
+#ifdef CONFIG_EVENT_POWER_TRACING_DEPRECATED
+
+/*
+ * The power events are used for cpuidle & suspend (power_start, power_end)
+ * and for cpufreq (power_frequency)
+ */
+DECLARE_EVENT_CLASS(power,
+
+ TP_PROTO(unsigned int type, unsigned int state, unsigned int cpu_id),
+
+ TP_ARGS(type, state, cpu_id),
+
+ TP_STRUCT__entry(
+ __field( u64, type )
+ __field( u64, state )
+ __field( u64, cpu_id )
+ ),
+
+ TP_fast_assign(
+ __entry->type = type;
+ __entry->state = state;
+ __entry->cpu_id = cpu_id;
+ ),
+
+ TP_printk("type=%lu state=%lu cpu_id=%lu", (unsigned long)__entry->type,
+ (unsigned long)__entry->state, (unsigned long)__entry->cpu_id)
+);
+
+DEFINE_EVENT(power, power_start,
+
+ TP_PROTO(unsigned int type, unsigned int state, unsigned int cpu_id),
+
+ TP_ARGS(type, state, cpu_id)
+);
+
+DEFINE_EVENT(power, power_frequency,
+
+ TP_PROTO(unsigned int type, unsigned int state, unsigned int cpu_id),
+
+ TP_ARGS(type, state, cpu_id)
+);
+
+TRACE_EVENT(power_end,
+
+ TP_PROTO(unsigned int cpu_id),
+
+ TP_ARGS(cpu_id),
+
+ TP_STRUCT__entry(
+ __field( u64, cpu_id )
+ ),
+
+ TP_fast_assign(
+ __entry->cpu_id = cpu_id;
+ ),
+
+ TP_printk("cpu_id=%lu", (unsigned long)__entry->cpu_id)
+
+);
+
+/* Deprecated dummy functions must be protected against multi-declartion */
+#ifndef _PWR_EVENT_AVOID_DOUBLE_DEFINING_DEPRECATED
+#define _PWR_EVENT_AVOID_DOUBLE_DEFINING_DEPRECATED
+
+enum {
+ POWER_NONE = 0,
+ POWER_CSTATE = 1,
+ POWER_PSTATE = 2,
+};
+#endif /* _PWR_EVENT_AVOID_DOUBLE_DEFINING_DEPRECATED */
+
+#else /* CONFIG_EVENT_POWER_TRACING_DEPRECATED */
+
+#ifndef _PWR_EVENT_AVOID_DOUBLE_DEFINING_DEPRECATED
+#define _PWR_EVENT_AVOID_DOUBLE_DEFINING_DEPRECATED
+enum {
+ POWER_NONE = 0,
+ POWER_CSTATE = 1,
+ POWER_PSTATE = 2,
+};
+
+/* These dummy declaration have to be ripped out when the deprecated
+ events get removed */
+static inline void trace_power_start(u64 type, u64 state, u64 cpuid) {};
+static inline void trace_power_end(u64 cpuid) {};
+static inline void trace_power_frequency(u64 type, u64 state, u64 cpuid) {};
+#endif /* _PWR_EVENT_AVOID_DOUBLE_DEFINING_DEPRECATED */
+
+#endif /* CONFIG_EVENT_POWER_TRACING_DEPRECATED */
+
+/*
+ * The clock events are used for clock enable/disable and for
+ * clock rate change
+ */
+DECLARE_EVENT_CLASS(clock,
+
+ TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
+
+ TP_ARGS(name, state, cpu_id),
+
+ TP_STRUCT__entry(
+ __string( name, name )
+ __field( u64, state )
+ __field( u64, cpu_id )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, name);
+ __entry->state = state;
+ __entry->cpu_id = cpu_id;
+ ),
+
+ TP_printk("%s state=%lu cpu_id=%lu", __get_str(name),
+ (unsigned long)__entry->state, (unsigned long)__entry->cpu_id)
+);
+
+DEFINE_EVENT(clock, clock_enable,
+
+ TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
+
+ TP_ARGS(name, state, cpu_id)
+);
+
+DEFINE_EVENT(clock, clock_disable,
+
+ TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
+
+ TP_ARGS(name, state, cpu_id)
+);
+
+DEFINE_EVENT(clock, clock_set_rate,
+
+ TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
+
+ TP_ARGS(name, state, cpu_id)
+);
+
+/*
+ * The power domain events are used for power domains transitions
+ */
+DECLARE_EVENT_CLASS(power_domain,
+
+ TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
+
+ TP_ARGS(name, state, cpu_id),
+
+ TP_STRUCT__entry(
+ __string( name, name )
+ __field( u64, state )
+ __field( u64, cpu_id )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, name);
+ __entry->state = state;
+ __entry->cpu_id = cpu_id;
+),
+
+ TP_printk("%s state=%lu cpu_id=%lu", __get_str(name),
+ (unsigned long)__entry->state, (unsigned long)__entry->cpu_id)
+);
+
+DEFINE_EVENT(power_domain, power_domain_target,
+
+ TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
+
+ TP_ARGS(name, state, cpu_id)
+);
+#endif /* _TRACE_POWER_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/regulator.h b/include/trace/events/regulator.h
new file mode 100644
index 00000000..37502a74
--- /dev/null
+++ b/include/trace/events/regulator.h
@@ -0,0 +1,141 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM regulator
+
+#if !defined(_TRACE_REGULATOR_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_REGULATOR_H
+
+#include <linux/ktime.h>
+#include <linux/tracepoint.h>
+
+/*
+ * Events which just log themselves and the regulator name for enable/disable
+ * type tracking.
+ */
+DECLARE_EVENT_CLASS(regulator_basic,
+
+ TP_PROTO(const char *name),
+
+ TP_ARGS(name),
+
+ TP_STRUCT__entry(
+ __string( name, name )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, name);
+ ),
+
+ TP_printk("name=%s", __get_str(name))
+
+);
+
+DEFINE_EVENT(regulator_basic, regulator_enable,
+
+ TP_PROTO(const char *name),
+
+ TP_ARGS(name)
+
+);
+
+DEFINE_EVENT(regulator_basic, regulator_enable_delay,
+
+ TP_PROTO(const char *name),
+
+ TP_ARGS(name)
+
+);
+
+DEFINE_EVENT(regulator_basic, regulator_enable_complete,
+
+ TP_PROTO(const char *name),
+
+ TP_ARGS(name)
+
+);
+
+DEFINE_EVENT(regulator_basic, regulator_disable,
+
+ TP_PROTO(const char *name),
+
+ TP_ARGS(name)
+
+);
+
+DEFINE_EVENT(regulator_basic, regulator_disable_complete,
+
+ TP_PROTO(const char *name),
+
+ TP_ARGS(name)
+
+);
+
+/*
+ * Events that take a range of numerical values, mostly for voltages
+ * and so on.
+ */
+DECLARE_EVENT_CLASS(regulator_range,
+
+ TP_PROTO(const char *name, int min, int max),
+
+ TP_ARGS(name, min, max),
+
+ TP_STRUCT__entry(
+ __string( name, name )
+ __field( int, min )
+ __field( int, max )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, name);
+ __entry->min = min;
+ __entry->max = max;
+ ),
+
+ TP_printk("name=%s (%d-%d)", __get_str(name),
+ (int)__entry->min, (int)__entry->max)
+);
+
+DEFINE_EVENT(regulator_range, regulator_set_voltage,
+
+ TP_PROTO(const char *name, int min, int max),
+
+ TP_ARGS(name, min, max)
+
+);
+
+
+/*
+ * Events that take a single value, mostly for readback and refcounts.
+ */
+DECLARE_EVENT_CLASS(regulator_value,
+
+ TP_PROTO(const char *name, unsigned int val),
+
+ TP_ARGS(name, val),
+
+ TP_STRUCT__entry(
+ __string( name, name )
+ __field( unsigned int, val )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, name);
+ __entry->val = val;
+ ),
+
+ TP_printk("name=%s, val=%u", __get_str(name),
+ (int)__entry->val)
+);
+
+DEFINE_EVENT(regulator_value, regulator_set_voltage_complete,
+
+ TP_PROTO(const char *name, unsigned int value),
+
+ TP_ARGS(name, value)
+
+);
+
+#endif /* _TRACE_POWER_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
new file mode 100644
index 00000000..f6334782
--- /dev/null
+++ b/include/trace/events/sched.h
@@ -0,0 +1,397 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM sched
+
+#if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_SCHED_H
+
+#include <linux/sched.h>
+#include <linux/tracepoint.h>
+
+/*
+ * Tracepoint for calling kthread_stop, performed to end a kthread:
+ */
+TRACE_EVENT(sched_kthread_stop,
+
+ TP_PROTO(struct task_struct *t),
+
+ TP_ARGS(t),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, t->comm, TASK_COMM_LEN);
+ __entry->pid = t->pid;
+ ),
+
+ TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
+);
+
+/*
+ * Tracepoint for the return value of the kthread stopping:
+ */
+TRACE_EVENT(sched_kthread_stop_ret,
+
+ TP_PROTO(int ret),
+
+ TP_ARGS(ret),
+
+ TP_STRUCT__entry(
+ __field( int, ret )
+ ),
+
+ TP_fast_assign(
+ __entry->ret = ret;
+ ),
+
+ TP_printk("ret=%d", __entry->ret)
+);
+
+/*
+ * Tracepoint for waking up a task:
+ */
+DECLARE_EVENT_CLASS(sched_wakeup_template,
+
+ TP_PROTO(struct task_struct *p, int success),
+
+ TP_ARGS(p, success),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field( int, prio )
+ __field( int, success )
+ __field( int, target_cpu )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+ __entry->pid = p->pid;
+ __entry->prio = p->prio;
+ __entry->success = success;
+ __entry->target_cpu = task_cpu(p);
+ ),
+
+ TP_printk("comm=%s pid=%d prio=%d success=%d target_cpu=%03d",
+ __entry->comm, __entry->pid, __entry->prio,
+ __entry->success, __entry->target_cpu)
+);
+
+DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
+ TP_PROTO(struct task_struct *p, int success),
+ TP_ARGS(p, success));
+
+/*
+ * Tracepoint for waking up a new task:
+ */
+DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
+ TP_PROTO(struct task_struct *p, int success),
+ TP_ARGS(p, success));
+
+#ifdef CREATE_TRACE_POINTS
+static inline long __trace_sched_switch_state(struct task_struct *p)
+{
+ long state = p->state;
+
+#ifdef CONFIG_PREEMPT
+ /*
+ * For all intents and purposes a preempted task is a running task.
+ */
+ if (task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)
+ state = TASK_RUNNING;
+#endif
+
+ return state;
+}
+#endif
+
+/*
+ * Tracepoint for task switches, performed by the scheduler:
+ */
+TRACE_EVENT(sched_switch,
+
+ TP_PROTO(struct task_struct *prev,
+ struct task_struct *next),
+
+ TP_ARGS(prev, next),
+
+ TP_STRUCT__entry(
+ __array( char, prev_comm, TASK_COMM_LEN )
+ __field( pid_t, prev_pid )
+ __field( int, prev_prio )
+ __field( long, prev_state )
+ __array( char, next_comm, TASK_COMM_LEN )
+ __field( pid_t, next_pid )
+ __field( int, next_prio )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
+ __entry->prev_pid = prev->pid;
+ __entry->prev_prio = prev->prio;
+ __entry->prev_state = __trace_sched_switch_state(prev);
+ memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
+ __entry->next_pid = next->pid;
+ __entry->next_prio = next->prio;
+ ),
+
+ TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s ==> next_comm=%s next_pid=%d next_prio=%d",
+ __entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
+ __entry->prev_state ?
+ __print_flags(__entry->prev_state, "|",
+ { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
+ { 16, "Z" }, { 32, "X" }, { 64, "x" },
+ { 128, "W" }) : "R",
+ __entry->next_comm, __entry->next_pid, __entry->next_prio)
+);
+
+/*
+ * Tracepoint for a task being migrated:
+ */
+TRACE_EVENT(sched_migrate_task,
+
+ TP_PROTO(struct task_struct *p, int dest_cpu),
+
+ TP_ARGS(p, dest_cpu),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field( int, prio )
+ __field( int, orig_cpu )
+ __field( int, dest_cpu )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+ __entry->pid = p->pid;
+ __entry->prio = p->prio;
+ __entry->orig_cpu = task_cpu(p);
+ __entry->dest_cpu = dest_cpu;
+ ),
+
+ TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d",
+ __entry->comm, __entry->pid, __entry->prio,
+ __entry->orig_cpu, __entry->dest_cpu)
+);
+
+DECLARE_EVENT_CLASS(sched_process_template,
+
+ TP_PROTO(struct task_struct *p),
+
+ TP_ARGS(p),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field( int, prio )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+ __entry->pid = p->pid;
+ __entry->prio = p->prio;
+ ),
+
+ TP_printk("comm=%s pid=%d prio=%d",
+ __entry->comm, __entry->pid, __entry->prio)
+);
+
+/*
+ * Tracepoint for freeing a task:
+ */
+DEFINE_EVENT(sched_process_template, sched_process_free,
+ TP_PROTO(struct task_struct *p),
+ TP_ARGS(p));
+
+
+/*
+ * Tracepoint for a task exiting:
+ */
+DEFINE_EVENT(sched_process_template, sched_process_exit,
+ TP_PROTO(struct task_struct *p),
+ TP_ARGS(p));
+
+/*
+ * Tracepoint for waiting on task to unschedule:
+ */
+DEFINE_EVENT(sched_process_template, sched_wait_task,
+ TP_PROTO(struct task_struct *p),
+ TP_ARGS(p));
+
+/*
+ * Tracepoint for a waiting task:
+ */
+TRACE_EVENT(sched_process_wait,
+
+ TP_PROTO(struct pid *pid),
+
+ TP_ARGS(pid),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field( int, prio )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
+ __entry->pid = pid_nr(pid);
+ __entry->prio = current->prio;
+ ),
+
+ TP_printk("comm=%s pid=%d prio=%d",
+ __entry->comm, __entry->pid, __entry->prio)
+);
+
+/*
+ * Tracepoint for do_fork:
+ */
+TRACE_EVENT(sched_process_fork,
+
+ TP_PROTO(struct task_struct *parent, struct task_struct *child),
+
+ TP_ARGS(parent, child),
+
+ TP_STRUCT__entry(
+ __array( char, parent_comm, TASK_COMM_LEN )
+ __field( pid_t, parent_pid )
+ __array( char, child_comm, TASK_COMM_LEN )
+ __field( pid_t, child_pid )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->parent_comm, parent->comm, TASK_COMM_LEN);
+ __entry->parent_pid = parent->pid;
+ memcpy(__entry->child_comm, child->comm, TASK_COMM_LEN);
+ __entry->child_pid = child->pid;
+ ),
+
+ TP_printk("comm=%s pid=%d child_comm=%s child_pid=%d",
+ __entry->parent_comm, __entry->parent_pid,
+ __entry->child_comm, __entry->child_pid)
+);
+
+/*
+ * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
+ * adding sched_stat support to SCHED_FIFO/RR would be welcome.
+ */
+DECLARE_EVENT_CLASS(sched_stat_template,
+
+ TP_PROTO(struct task_struct *tsk, u64 delay),
+
+ TP_ARGS(tsk, delay),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field( u64, delay )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+ __entry->pid = tsk->pid;
+ __entry->delay = delay;
+ )
+ TP_perf_assign(
+ __perf_count(delay);
+ ),
+
+ TP_printk("comm=%s pid=%d delay=%Lu [ns]",
+ __entry->comm, __entry->pid,
+ (unsigned long long)__entry->delay)
+);
+
+
+/*
+ * Tracepoint for accounting wait time (time the task is runnable
+ * but not actually running due to scheduler contention).
+ */
+DEFINE_EVENT(sched_stat_template, sched_stat_wait,
+ TP_PROTO(struct task_struct *tsk, u64 delay),
+ TP_ARGS(tsk, delay));
+
+/*
+ * Tracepoint for accounting sleep time (time the task is not runnable,
+ * including iowait, see below).
+ */
+DEFINE_EVENT(sched_stat_template, sched_stat_sleep,
+ TP_PROTO(struct task_struct *tsk, u64 delay),
+ TP_ARGS(tsk, delay));
+
+/*
+ * Tracepoint for accounting iowait time (time the task is not runnable
+ * due to waiting on IO to complete).
+ */
+DEFINE_EVENT(sched_stat_template, sched_stat_iowait,
+ TP_PROTO(struct task_struct *tsk, u64 delay),
+ TP_ARGS(tsk, delay));
+
+/*
+ * Tracepoint for accounting runtime (time the task is executing
+ * on a CPU).
+ */
+TRACE_EVENT(sched_stat_runtime,
+
+ TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
+
+ TP_ARGS(tsk, runtime, vruntime),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field( u64, runtime )
+ __field( u64, vruntime )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+ __entry->pid = tsk->pid;
+ __entry->runtime = runtime;
+ __entry->vruntime = vruntime;
+ )
+ TP_perf_assign(
+ __perf_count(runtime);
+ ),
+
+ TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
+ __entry->comm, __entry->pid,
+ (unsigned long long)__entry->runtime,
+ (unsigned long long)__entry->vruntime)
+);
+
+/*
+ * Tracepoint for showing priority inheritance modifying a tasks
+ * priority.
+ */
+TRACE_EVENT(sched_pi_setprio,
+
+ TP_PROTO(struct task_struct *tsk, int newprio),
+
+ TP_ARGS(tsk, newprio),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field( int, oldprio )
+ __field( int, newprio )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+ __entry->pid = tsk->pid;
+ __entry->oldprio = tsk->prio;
+ __entry->newprio = newprio;
+ ),
+
+ TP_printk("comm=%s pid=%d oldprio=%d newprio=%d",
+ __entry->comm, __entry->pid,
+ __entry->oldprio, __entry->newprio)
+);
+
+#endif /* _TRACE_SCHED_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/scsi.h b/include/trace/events/scsi.h
new file mode 100644
index 00000000..db6c9351
--- /dev/null
+++ b/include/trace/events/scsi.h
@@ -0,0 +1,365 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM scsi
+
+#if !defined(_TRACE_SCSI_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_SCSI_H
+
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_host.h>
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+
+#define scsi_opcode_name(opcode) { opcode, #opcode }
+#define show_opcode_name(val) \
+ __print_symbolic(val, \
+ scsi_opcode_name(TEST_UNIT_READY), \
+ scsi_opcode_name(REZERO_UNIT), \
+ scsi_opcode_name(REQUEST_SENSE), \
+ scsi_opcode_name(FORMAT_UNIT), \
+ scsi_opcode_name(READ_BLOCK_LIMITS), \
+ scsi_opcode_name(REASSIGN_BLOCKS), \
+ scsi_opcode_name(INITIALIZE_ELEMENT_STATUS), \
+ scsi_opcode_name(READ_6), \
+ scsi_opcode_name(WRITE_6), \
+ scsi_opcode_name(SEEK_6), \
+ scsi_opcode_name(READ_REVERSE), \
+ scsi_opcode_name(WRITE_FILEMARKS), \
+ scsi_opcode_name(SPACE), \
+ scsi_opcode_name(INQUIRY), \
+ scsi_opcode_name(RECOVER_BUFFERED_DATA), \
+ scsi_opcode_name(MODE_SELECT), \
+ scsi_opcode_name(RESERVE), \
+ scsi_opcode_name(RELEASE), \
+ scsi_opcode_name(COPY), \
+ scsi_opcode_name(ERASE), \
+ scsi_opcode_name(MODE_SENSE), \
+ scsi_opcode_name(START_STOP), \
+ scsi_opcode_name(RECEIVE_DIAGNOSTIC), \
+ scsi_opcode_name(SEND_DIAGNOSTIC), \
+ scsi_opcode_name(ALLOW_MEDIUM_REMOVAL), \
+ scsi_opcode_name(SET_WINDOW), \
+ scsi_opcode_name(READ_CAPACITY), \
+ scsi_opcode_name(READ_10), \
+ scsi_opcode_name(WRITE_10), \
+ scsi_opcode_name(SEEK_10), \
+ scsi_opcode_name(POSITION_TO_ELEMENT), \
+ scsi_opcode_name(WRITE_VERIFY), \
+ scsi_opcode_name(VERIFY), \
+ scsi_opcode_name(SEARCH_HIGH), \
+ scsi_opcode_name(SEARCH_EQUAL), \
+ scsi_opcode_name(SEARCH_LOW), \
+ scsi_opcode_name(SET_LIMITS), \
+ scsi_opcode_name(PRE_FETCH), \
+ scsi_opcode_name(READ_POSITION), \
+ scsi_opcode_name(SYNCHRONIZE_CACHE), \
+ scsi_opcode_name(LOCK_UNLOCK_CACHE), \
+ scsi_opcode_name(READ_DEFECT_DATA), \
+ scsi_opcode_name(MEDIUM_SCAN), \
+ scsi_opcode_name(COMPARE), \
+ scsi_opcode_name(COPY_VERIFY), \
+ scsi_opcode_name(WRITE_BUFFER), \
+ scsi_opcode_name(READ_BUFFER), \
+ scsi_opcode_name(UPDATE_BLOCK), \
+ scsi_opcode_name(READ_LONG), \
+ scsi_opcode_name(WRITE_LONG), \
+ scsi_opcode_name(CHANGE_DEFINITION), \
+ scsi_opcode_name(WRITE_SAME), \
+ scsi_opcode_name(UNMAP), \
+ scsi_opcode_name(READ_TOC), \
+ scsi_opcode_name(LOG_SELECT), \
+ scsi_opcode_name(LOG_SENSE), \
+ scsi_opcode_name(XDWRITEREAD_10), \
+ scsi_opcode_name(MODE_SELECT_10), \
+ scsi_opcode_name(RESERVE_10), \
+ scsi_opcode_name(RELEASE_10), \
+ scsi_opcode_name(MODE_SENSE_10), \
+ scsi_opcode_name(PERSISTENT_RESERVE_IN), \
+ scsi_opcode_name(PERSISTENT_RESERVE_OUT), \
+ scsi_opcode_name(VARIABLE_LENGTH_CMD), \
+ scsi_opcode_name(REPORT_LUNS), \
+ scsi_opcode_name(MAINTENANCE_IN), \
+ scsi_opcode_name(MAINTENANCE_OUT), \
+ scsi_opcode_name(MOVE_MEDIUM), \
+ scsi_opcode_name(EXCHANGE_MEDIUM), \
+ scsi_opcode_name(READ_12), \
+ scsi_opcode_name(WRITE_12), \
+ scsi_opcode_name(WRITE_VERIFY_12), \
+ scsi_opcode_name(SEARCH_HIGH_12), \
+ scsi_opcode_name(SEARCH_EQUAL_12), \
+ scsi_opcode_name(SEARCH_LOW_12), \
+ scsi_opcode_name(READ_ELEMENT_STATUS), \
+ scsi_opcode_name(SEND_VOLUME_TAG), \
+ scsi_opcode_name(WRITE_LONG_2), \
+ scsi_opcode_name(READ_16), \
+ scsi_opcode_name(WRITE_16), \
+ scsi_opcode_name(VERIFY_16), \
+ scsi_opcode_name(WRITE_SAME_16), \
+ scsi_opcode_name(SERVICE_ACTION_IN), \
+ scsi_opcode_name(SAI_READ_CAPACITY_16), \
+ scsi_opcode_name(SAI_GET_LBA_STATUS), \
+ scsi_opcode_name(MI_REPORT_TARGET_PGS), \
+ scsi_opcode_name(MO_SET_TARGET_PGS), \
+ scsi_opcode_name(READ_32), \
+ scsi_opcode_name(WRITE_32), \
+ scsi_opcode_name(WRITE_SAME_32), \
+ scsi_opcode_name(ATA_16), \
+ scsi_opcode_name(ATA_12))
+
+#define scsi_hostbyte_name(result) { result, #result }
+#define show_hostbyte_name(val) \
+ __print_symbolic(val, \
+ scsi_hostbyte_name(DID_OK), \
+ scsi_hostbyte_name(DID_NO_CONNECT), \
+ scsi_hostbyte_name(DID_BUS_BUSY), \
+ scsi_hostbyte_name(DID_TIME_OUT), \
+ scsi_hostbyte_name(DID_BAD_TARGET), \
+ scsi_hostbyte_name(DID_ABORT), \
+ scsi_hostbyte_name(DID_PARITY), \
+ scsi_hostbyte_name(DID_ERROR), \
+ scsi_hostbyte_name(DID_RESET), \
+ scsi_hostbyte_name(DID_BAD_INTR), \
+ scsi_hostbyte_name(DID_PASSTHROUGH), \
+ scsi_hostbyte_name(DID_SOFT_ERROR), \
+ scsi_hostbyte_name(DID_IMM_RETRY), \
+ scsi_hostbyte_name(DID_REQUEUE), \
+ scsi_hostbyte_name(DID_TRANSPORT_DISRUPTED), \
+ scsi_hostbyte_name(DID_TRANSPORT_FAILFAST))
+
+#define scsi_driverbyte_name(result) { result, #result }
+#define show_driverbyte_name(val) \
+ __print_symbolic(val, \
+ scsi_driverbyte_name(DRIVER_OK), \
+ scsi_driverbyte_name(DRIVER_BUSY), \
+ scsi_driverbyte_name(DRIVER_SOFT), \
+ scsi_driverbyte_name(DRIVER_MEDIA), \
+ scsi_driverbyte_name(DRIVER_ERROR), \
+ scsi_driverbyte_name(DRIVER_INVALID), \
+ scsi_driverbyte_name(DRIVER_TIMEOUT), \
+ scsi_driverbyte_name(DRIVER_HARD), \
+ scsi_driverbyte_name(DRIVER_SENSE))
+
+#define scsi_msgbyte_name(result) { result, #result }
+#define show_msgbyte_name(val) \
+ __print_symbolic(val, \
+ scsi_msgbyte_name(COMMAND_COMPLETE), \
+ scsi_msgbyte_name(EXTENDED_MESSAGE), \
+ scsi_msgbyte_name(SAVE_POINTERS), \
+ scsi_msgbyte_name(RESTORE_POINTERS), \
+ scsi_msgbyte_name(DISCONNECT), \
+ scsi_msgbyte_name(INITIATOR_ERROR), \
+ scsi_msgbyte_name(ABORT_TASK_SET), \
+ scsi_msgbyte_name(MESSAGE_REJECT), \
+ scsi_msgbyte_name(NOP), \
+ scsi_msgbyte_name(MSG_PARITY_ERROR), \
+ scsi_msgbyte_name(LINKED_CMD_COMPLETE), \
+ scsi_msgbyte_name(LINKED_FLG_CMD_COMPLETE), \
+ scsi_msgbyte_name(TARGET_RESET), \
+ scsi_msgbyte_name(ABORT_TASK), \
+ scsi_msgbyte_name(CLEAR_TASK_SET), \
+ scsi_msgbyte_name(INITIATE_RECOVERY), \
+ scsi_msgbyte_name(RELEASE_RECOVERY), \
+ scsi_msgbyte_name(CLEAR_ACA), \
+ scsi_msgbyte_name(LOGICAL_UNIT_RESET), \
+ scsi_msgbyte_name(SIMPLE_QUEUE_TAG), \
+ scsi_msgbyte_name(HEAD_OF_QUEUE_TAG), \
+ scsi_msgbyte_name(ORDERED_QUEUE_TAG), \
+ scsi_msgbyte_name(IGNORE_WIDE_RESIDUE), \
+ scsi_msgbyte_name(ACA), \
+ scsi_msgbyte_name(QAS_REQUEST), \
+ scsi_msgbyte_name(BUS_DEVICE_RESET), \
+ scsi_msgbyte_name(ABORT))
+
+#define scsi_statusbyte_name(result) { result, #result }
+#define show_statusbyte_name(val) \
+ __print_symbolic(val, \
+ scsi_statusbyte_name(SAM_STAT_GOOD), \
+ scsi_statusbyte_name(SAM_STAT_CHECK_CONDITION), \
+ scsi_statusbyte_name(SAM_STAT_CONDITION_MET), \
+ scsi_statusbyte_name(SAM_STAT_BUSY), \
+ scsi_statusbyte_name(SAM_STAT_INTERMEDIATE), \
+ scsi_statusbyte_name(SAM_STAT_INTERMEDIATE_CONDITION_MET), \
+ scsi_statusbyte_name(SAM_STAT_RESERVATION_CONFLICT), \
+ scsi_statusbyte_name(SAM_STAT_COMMAND_TERMINATED), \
+ scsi_statusbyte_name(SAM_STAT_TASK_SET_FULL), \
+ scsi_statusbyte_name(SAM_STAT_ACA_ACTIVE), \
+ scsi_statusbyte_name(SAM_STAT_TASK_ABORTED))
+
+#define scsi_prot_op_name(result) { result, #result }
+#define show_prot_op_name(val) \
+ __print_symbolic(val, \
+ scsi_prot_op_name(SCSI_PROT_NORMAL), \
+ scsi_prot_op_name(SCSI_PROT_READ_INSERT), \
+ scsi_prot_op_name(SCSI_PROT_WRITE_STRIP), \
+ scsi_prot_op_name(SCSI_PROT_READ_STRIP), \
+ scsi_prot_op_name(SCSI_PROT_WRITE_INSERT), \
+ scsi_prot_op_name(SCSI_PROT_READ_PASS), \
+ scsi_prot_op_name(SCSI_PROT_WRITE_PASS))
+
+const char *scsi_trace_parse_cdb(struct trace_seq*, unsigned char*, int);
+#define __parse_cdb(cdb, len) scsi_trace_parse_cdb(p, cdb, len)
+
+TRACE_EVENT(scsi_dispatch_cmd_start,
+
+ TP_PROTO(struct scsi_cmnd *cmd),
+
+ TP_ARGS(cmd),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, host_no )
+ __field( unsigned int, channel )
+ __field( unsigned int, id )
+ __field( unsigned int, lun )
+ __field( unsigned int, opcode )
+ __field( unsigned int, cmd_len )
+ __field( unsigned int, data_sglen )
+ __field( unsigned int, prot_sglen )
+ __field( unsigned char, prot_op )
+ __dynamic_array(unsigned char, cmnd, cmd->cmd_len)
+ ),
+
+ TP_fast_assign(
+ __entry->host_no = cmd->device->host->host_no;
+ __entry->channel = cmd->device->channel;
+ __entry->id = cmd->device->id;
+ __entry->lun = cmd->device->lun;
+ __entry->opcode = cmd->cmnd[0];
+ __entry->cmd_len = cmd->cmd_len;
+ __entry->data_sglen = scsi_sg_count(cmd);
+ __entry->prot_sglen = scsi_prot_sg_count(cmd);
+ __entry->prot_op = scsi_get_prot_op(cmd);
+ memcpy(__get_dynamic_array(cmnd), cmd->cmnd, cmd->cmd_len);
+ ),
+
+ TP_printk("host_no=%u channel=%u id=%u lun=%u data_sgl=%u prot_sgl=%u" \
+ " prot_op=%s cmnd=(%s %s raw=%s)",
+ __entry->host_no, __entry->channel, __entry->id,
+ __entry->lun, __entry->data_sglen, __entry->prot_sglen,
+ show_prot_op_name(__entry->prot_op),
+ show_opcode_name(__entry->opcode),
+ __parse_cdb(__get_dynamic_array(cmnd), __entry->cmd_len),
+ __print_hex(__get_dynamic_array(cmnd), __entry->cmd_len))
+);
+
+TRACE_EVENT(scsi_dispatch_cmd_error,
+
+ TP_PROTO(struct scsi_cmnd *cmd, int rtn),
+
+ TP_ARGS(cmd, rtn),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, host_no )
+ __field( unsigned int, channel )
+ __field( unsigned int, id )
+ __field( unsigned int, lun )
+ __field( int, rtn )
+ __field( unsigned int, opcode )
+ __field( unsigned int, cmd_len )
+ __field( unsigned int, data_sglen )
+ __field( unsigned int, prot_sglen )
+ __field( unsigned char, prot_op )
+ __dynamic_array(unsigned char, cmnd, cmd->cmd_len)
+ ),
+
+ TP_fast_assign(
+ __entry->host_no = cmd->device->host->host_no;
+ __entry->channel = cmd->device->channel;
+ __entry->id = cmd->device->id;
+ __entry->lun = cmd->device->lun;
+ __entry->rtn = rtn;
+ __entry->opcode = cmd->cmnd[0];
+ __entry->cmd_len = cmd->cmd_len;
+ __entry->data_sglen = scsi_sg_count(cmd);
+ __entry->prot_sglen = scsi_prot_sg_count(cmd);
+ __entry->prot_op = scsi_get_prot_op(cmd);
+ memcpy(__get_dynamic_array(cmnd), cmd->cmnd, cmd->cmd_len);
+ ),
+
+ TP_printk("host_no=%u channel=%u id=%u lun=%u data_sgl=%u prot_sgl=%u" \
+ " prot_op=%s cmnd=(%s %s raw=%s) rtn=%d",
+ __entry->host_no, __entry->channel, __entry->id,
+ __entry->lun, __entry->data_sglen, __entry->prot_sglen,
+ show_prot_op_name(__entry->prot_op),
+ show_opcode_name(__entry->opcode),
+ __parse_cdb(__get_dynamic_array(cmnd), __entry->cmd_len),
+ __print_hex(__get_dynamic_array(cmnd), __entry->cmd_len),
+ __entry->rtn)
+);
+
+DECLARE_EVENT_CLASS(scsi_cmd_done_timeout_template,
+
+ TP_PROTO(struct scsi_cmnd *cmd),
+
+ TP_ARGS(cmd),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, host_no )
+ __field( unsigned int, channel )
+ __field( unsigned int, id )
+ __field( unsigned int, lun )
+ __field( int, result )
+ __field( unsigned int, opcode )
+ __field( unsigned int, cmd_len )
+ __field( unsigned int, data_sglen )
+ __field( unsigned int, prot_sglen )
+ __field( unsigned char, prot_op )
+ __dynamic_array(unsigned char, cmnd, cmd->cmd_len)
+ ),
+
+ TP_fast_assign(
+ __entry->host_no = cmd->device->host->host_no;
+ __entry->channel = cmd->device->channel;
+ __entry->id = cmd->device->id;
+ __entry->lun = cmd->device->lun;
+ __entry->result = cmd->result;
+ __entry->opcode = cmd->cmnd[0];
+ __entry->cmd_len = cmd->cmd_len;
+ __entry->data_sglen = scsi_sg_count(cmd);
+ __entry->prot_sglen = scsi_prot_sg_count(cmd);
+ __entry->prot_op = scsi_get_prot_op(cmd);
+ memcpy(__get_dynamic_array(cmnd), cmd->cmnd, cmd->cmd_len);
+ ),
+
+ TP_printk("host_no=%u channel=%u id=%u lun=%u data_sgl=%u " \
+ "prot_sgl=%u prot_op=%s cmnd=(%s %s raw=%s) result=(driver=" \
+ "%s host=%s message=%s status=%s)",
+ __entry->host_no, __entry->channel, __entry->id,
+ __entry->lun, __entry->data_sglen, __entry->prot_sglen,
+ show_prot_op_name(__entry->prot_op),
+ show_opcode_name(__entry->opcode),
+ __parse_cdb(__get_dynamic_array(cmnd), __entry->cmd_len),
+ __print_hex(__get_dynamic_array(cmnd), __entry->cmd_len),
+ show_driverbyte_name(((__entry->result) >> 24) & 0xff),
+ show_hostbyte_name(((__entry->result) >> 16) & 0xff),
+ show_msgbyte_name(((__entry->result) >> 8) & 0xff),
+ show_statusbyte_name(__entry->result & 0xff))
+);
+
+DEFINE_EVENT(scsi_cmd_done_timeout_template, scsi_dispatch_cmd_done,
+ TP_PROTO(struct scsi_cmnd *cmd),
+ TP_ARGS(cmd));
+
+DEFINE_EVENT(scsi_cmd_done_timeout_template, scsi_dispatch_cmd_timeout,
+ TP_PROTO(struct scsi_cmnd *cmd),
+ TP_ARGS(cmd));
+
+TRACE_EVENT(scsi_eh_wakeup,
+
+ TP_PROTO(struct Scsi_Host *shost),
+
+ TP_ARGS(shost),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, host_no )
+ ),
+
+ TP_fast_assign(
+ __entry->host_no = shost->host_no;
+ ),
+
+ TP_printk("host_no=%u", __entry->host_no)
+);
+
+#endif /* _TRACE_SCSI_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/signal.h b/include/trace/events/signal.h
new file mode 100644
index 00000000..17df4346
--- /dev/null
+++ b/include/trace/events/signal.h
@@ -0,0 +1,166 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM signal
+
+#if !defined(_TRACE_SIGNAL_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_SIGNAL_H
+
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/tracepoint.h>
+
+#define TP_STORE_SIGINFO(__entry, info) \
+ do { \
+ if (info == SEND_SIG_NOINFO || \
+ info == SEND_SIG_FORCED) { \
+ __entry->errno = 0; \
+ __entry->code = SI_USER; \
+ } else if (info == SEND_SIG_PRIV) { \
+ __entry->errno = 0; \
+ __entry->code = SI_KERNEL; \
+ } else { \
+ __entry->errno = info->si_errno; \
+ __entry->code = info->si_code; \
+ } \
+ } while (0)
+
+/**
+ * signal_generate - called when a signal is generated
+ * @sig: signal number
+ * @info: pointer to struct siginfo
+ * @task: pointer to struct task_struct
+ *
+ * Current process sends a 'sig' signal to 'task' process with
+ * 'info' siginfo. If 'info' is SEND_SIG_NOINFO or SEND_SIG_PRIV,
+ * 'info' is not a pointer and you can't access its field. Instead,
+ * SEND_SIG_NOINFO means that si_code is SI_USER, and SEND_SIG_PRIV
+ * means that si_code is SI_KERNEL.
+ */
+TRACE_EVENT(signal_generate,
+
+ TP_PROTO(int sig, struct siginfo *info, struct task_struct *task),
+
+ TP_ARGS(sig, info, task),
+
+ TP_STRUCT__entry(
+ __field( int, sig )
+ __field( int, errno )
+ __field( int, code )
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ ),
+
+ TP_fast_assign(
+ __entry->sig = sig;
+ TP_STORE_SIGINFO(__entry, info);
+ memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
+ __entry->pid = task->pid;
+ ),
+
+ TP_printk("sig=%d errno=%d code=%d comm=%s pid=%d",
+ __entry->sig, __entry->errno, __entry->code,
+ __entry->comm, __entry->pid)
+);
+
+/**
+ * signal_deliver - called when a signal is delivered
+ * @sig: signal number
+ * @info: pointer to struct siginfo
+ * @ka: pointer to struct k_sigaction
+ *
+ * A 'sig' signal is delivered to current process with 'info' siginfo,
+ * and it will be handled by 'ka'. ka->sa.sa_handler can be SIG_IGN or
+ * SIG_DFL.
+ * Note that some signals reported by signal_generate tracepoint can be
+ * lost, ignored or modified (by debugger) before hitting this tracepoint.
+ * This means, this can show which signals are actually delivered, but
+ * matching generated signals and delivered signals may not be correct.
+ */
+TRACE_EVENT(signal_deliver,
+
+ TP_PROTO(int sig, struct siginfo *info, struct k_sigaction *ka),
+
+ TP_ARGS(sig, info, ka),
+
+ TP_STRUCT__entry(
+ __field( int, sig )
+ __field( int, errno )
+ __field( int, code )
+ __field( unsigned long, sa_handler )
+ __field( unsigned long, sa_flags )
+ ),
+
+ TP_fast_assign(
+ __entry->sig = sig;
+ TP_STORE_SIGINFO(__entry, info);
+ __entry->sa_handler = (unsigned long)ka->sa.sa_handler;
+ __entry->sa_flags = ka->sa.sa_flags;
+ ),
+
+ TP_printk("sig=%d errno=%d code=%d sa_handler=%lx sa_flags=%lx",
+ __entry->sig, __entry->errno, __entry->code,
+ __entry->sa_handler, __entry->sa_flags)
+);
+
+DECLARE_EVENT_CLASS(signal_queue_overflow,
+
+ TP_PROTO(int sig, int group, struct siginfo *info),
+
+ TP_ARGS(sig, group, info),
+
+ TP_STRUCT__entry(
+ __field( int, sig )
+ __field( int, group )
+ __field( int, errno )
+ __field( int, code )
+ ),
+
+ TP_fast_assign(
+ __entry->sig = sig;
+ __entry->group = group;
+ TP_STORE_SIGINFO(__entry, info);
+ ),
+
+ TP_printk("sig=%d group=%d errno=%d code=%d",
+ __entry->sig, __entry->group, __entry->errno, __entry->code)
+);
+
+/**
+ * signal_overflow_fail - called when signal queue is overflow
+ * @sig: signal number
+ * @group: signal to process group or not (bool)
+ * @info: pointer to struct siginfo
+ *
+ * Kernel fails to generate 'sig' signal with 'info' siginfo, because
+ * siginfo queue is overflow, and the signal is dropped.
+ * 'group' is not 0 if the signal will be sent to a process group.
+ * 'sig' is always one of RT signals.
+ */
+DEFINE_EVENT(signal_queue_overflow, signal_overflow_fail,
+
+ TP_PROTO(int sig, int group, struct siginfo *info),
+
+ TP_ARGS(sig, group, info)
+);
+
+/**
+ * signal_lose_info - called when siginfo is lost
+ * @sig: signal number
+ * @group: signal to process group or not (bool)
+ * @info: pointer to struct siginfo
+ *
+ * Kernel generates 'sig' signal but loses 'info' siginfo, because siginfo
+ * queue is overflow.
+ * 'group' is not 0 if the signal will be sent to a process group.
+ * 'sig' is always one of non-RT signals.
+ */
+DEFINE_EVENT(signal_queue_overflow, signal_lose_info,
+
+ TP_PROTO(int sig, int group, struct siginfo *info),
+
+ TP_ARGS(sig, group, info)
+);
+
+#endif /* _TRACE_SIGNAL_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/skb.h b/include/trace/events/skb.h
new file mode 100644
index 00000000..0c68ae22
--- /dev/null
+++ b/include/trace/events/skb.h
@@ -0,0 +1,75 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM skb
+
+#if !defined(_TRACE_SKB_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_SKB_H
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/tracepoint.h>
+
+/*
+ * Tracepoint for free an sk_buff:
+ */
+TRACE_EVENT(kfree_skb,
+
+ TP_PROTO(struct sk_buff *skb, void *location),
+
+ TP_ARGS(skb, location),
+
+ TP_STRUCT__entry(
+ __field( void *, skbaddr )
+ __field( void *, location )
+ __field( unsigned short, protocol )
+ ),
+
+ TP_fast_assign(
+ __entry->skbaddr = skb;
+ __entry->location = location;
+ __entry->protocol = ntohs(skb->protocol);
+ ),
+
+ TP_printk("skbaddr=%p protocol=%u location=%p",
+ __entry->skbaddr, __entry->protocol, __entry->location)
+);
+
+TRACE_EVENT(consume_skb,
+
+ TP_PROTO(struct sk_buff *skb),
+
+ TP_ARGS(skb),
+
+ TP_STRUCT__entry(
+ __field( void *, skbaddr )
+ ),
+
+ TP_fast_assign(
+ __entry->skbaddr = skb;
+ ),
+
+ TP_printk("skbaddr=%p", __entry->skbaddr)
+);
+
+TRACE_EVENT(skb_copy_datagram_iovec,
+
+ TP_PROTO(const struct sk_buff *skb, int len),
+
+ TP_ARGS(skb, len),
+
+ TP_STRUCT__entry(
+ __field( const void *, skbaddr )
+ __field( int, len )
+ ),
+
+ TP_fast_assign(
+ __entry->skbaddr = skb;
+ __entry->len = len;
+ ),
+
+ TP_printk("skbaddr=%p len=%d", __entry->skbaddr, __entry->len)
+);
+
+#endif /* _TRACE_SKB_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/syscalls.h b/include/trace/events/syscalls.h
new file mode 100644
index 00000000..5a4c04a7
--- /dev/null
+++ b/include/trace/events/syscalls.h
@@ -0,0 +1,75 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM raw_syscalls
+#define TRACE_INCLUDE_FILE syscalls
+
+#if !defined(_TRACE_EVENTS_SYSCALLS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_EVENTS_SYSCALLS_H
+
+#include <linux/tracepoint.h>
+
+#include <asm/ptrace.h>
+#include <asm/syscall.h>
+
+
+#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
+
+extern void syscall_regfunc(void);
+extern void syscall_unregfunc(void);
+
+TRACE_EVENT_FN(sys_enter,
+
+ TP_PROTO(struct pt_regs *regs, long id),
+
+ TP_ARGS(regs, id),
+
+ TP_STRUCT__entry(
+ __field( long, id )
+ __array( unsigned long, args, 6 )
+ ),
+
+ TP_fast_assign(
+ __entry->id = id;
+ syscall_get_arguments(current, regs, 0, 6, __entry->args);
+ ),
+
+ TP_printk("NR %ld (%lx, %lx, %lx, %lx, %lx, %lx)",
+ __entry->id,
+ __entry->args[0], __entry->args[1], __entry->args[2],
+ __entry->args[3], __entry->args[4], __entry->args[5]),
+
+ syscall_regfunc, syscall_unregfunc
+);
+
+TRACE_EVENT_FLAGS(sys_enter, TRACE_EVENT_FL_CAP_ANY)
+
+TRACE_EVENT_FN(sys_exit,
+
+ TP_PROTO(struct pt_regs *regs, long ret),
+
+ TP_ARGS(regs, ret),
+
+ TP_STRUCT__entry(
+ __field( long, id )
+ __field( long, ret )
+ ),
+
+ TP_fast_assign(
+ __entry->id = syscall_get_nr(current, regs);
+ __entry->ret = ret;
+ ),
+
+ TP_printk("NR %ld = %ld",
+ __entry->id, __entry->ret),
+
+ syscall_regfunc, syscall_unregfunc
+);
+
+TRACE_EVENT_FLAGS(sys_exit, TRACE_EVENT_FL_CAP_ANY)
+
+#endif /* CONFIG_HAVE_SYSCALL_TRACEPOINTS */
+
+#endif /* _TRACE_EVENTS_SYSCALLS_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
+
diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h
new file mode 100644
index 00000000..425bcfe5
--- /dev/null
+++ b/include/trace/events/timer.h
@@ -0,0 +1,329 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM timer
+
+#if !defined(_TRACE_TIMER_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_TIMER_H
+
+#include <linux/tracepoint.h>
+#include <linux/hrtimer.h>
+#include <linux/timer.h>
+
+DECLARE_EVENT_CLASS(timer_class,
+
+ TP_PROTO(struct timer_list *timer),
+
+ TP_ARGS(timer),
+
+ TP_STRUCT__entry(
+ __field( void *, timer )
+ ),
+
+ TP_fast_assign(
+ __entry->timer = timer;
+ ),
+
+ TP_printk("timer=%p", __entry->timer)
+);
+
+/**
+ * timer_init - called when the timer is initialized
+ * @timer: pointer to struct timer_list
+ */
+DEFINE_EVENT(timer_class, timer_init,
+
+ TP_PROTO(struct timer_list *timer),
+
+ TP_ARGS(timer)
+);
+
+/**
+ * timer_start - called when the timer is started
+ * @timer: pointer to struct timer_list
+ * @expires: the timers expiry time
+ */
+TRACE_EVENT(timer_start,
+
+ TP_PROTO(struct timer_list *timer, unsigned long expires),
+
+ TP_ARGS(timer, expires),
+
+ TP_STRUCT__entry(
+ __field( void *, timer )
+ __field( void *, function )
+ __field( unsigned long, expires )
+ __field( unsigned long, now )
+ ),
+
+ TP_fast_assign(
+ __entry->timer = timer;
+ __entry->function = timer->function;
+ __entry->expires = expires;
+ __entry->now = jiffies;
+ ),
+
+ TP_printk("timer=%p function=%pf expires=%lu [timeout=%ld]",
+ __entry->timer, __entry->function, __entry->expires,
+ (long)__entry->expires - __entry->now)
+);
+
+/**
+ * timer_expire_entry - called immediately before the timer callback
+ * @timer: pointer to struct timer_list
+ *
+ * Allows to determine the timer latency.
+ */
+TRACE_EVENT(timer_expire_entry,
+
+ TP_PROTO(struct timer_list *timer),
+
+ TP_ARGS(timer),
+
+ TP_STRUCT__entry(
+ __field( void *, timer )
+ __field( unsigned long, now )
+ __field( void *, function)
+ ),
+
+ TP_fast_assign(
+ __entry->timer = timer;
+ __entry->now = jiffies;
+ __entry->function = timer->function;
+ ),
+
+ TP_printk("timer=%p function=%pf now=%lu", __entry->timer, __entry->function,__entry->now)
+);
+
+/**
+ * timer_expire_exit - called immediately after the timer callback returns
+ * @timer: pointer to struct timer_list
+ *
+ * When used in combination with the timer_expire_entry tracepoint we can
+ * determine the runtime of the timer callback function.
+ *
+ * NOTE: Do NOT derefernce timer in TP_fast_assign. The pointer might
+ * be invalid. We solely track the pointer.
+ */
+DEFINE_EVENT(timer_class, timer_expire_exit,
+
+ TP_PROTO(struct timer_list *timer),
+
+ TP_ARGS(timer)
+);
+
+/**
+ * timer_cancel - called when the timer is canceled
+ * @timer: pointer to struct timer_list
+ */
+DEFINE_EVENT(timer_class, timer_cancel,
+
+ TP_PROTO(struct timer_list *timer),
+
+ TP_ARGS(timer)
+);
+
+/**
+ * hrtimer_init - called when the hrtimer is initialized
+ * @timer: pointer to struct hrtimer
+ * @clockid: the hrtimers clock
+ * @mode: the hrtimers mode
+ */
+TRACE_EVENT(hrtimer_init,
+
+ TP_PROTO(struct hrtimer *hrtimer, clockid_t clockid,
+ enum hrtimer_mode mode),
+
+ TP_ARGS(hrtimer, clockid, mode),
+
+ TP_STRUCT__entry(
+ __field( void *, hrtimer )
+ __field( clockid_t, clockid )
+ __field( enum hrtimer_mode, mode )
+ ),
+
+ TP_fast_assign(
+ __entry->hrtimer = hrtimer;
+ __entry->clockid = clockid;
+ __entry->mode = mode;
+ ),
+
+ TP_printk("hrtimer=%p clockid=%s mode=%s", __entry->hrtimer,
+ __entry->clockid == CLOCK_REALTIME ?
+ "CLOCK_REALTIME" : "CLOCK_MONOTONIC",
+ __entry->mode == HRTIMER_MODE_ABS ?
+ "HRTIMER_MODE_ABS" : "HRTIMER_MODE_REL")
+);
+
+/**
+ * hrtimer_start - called when the hrtimer is started
+ * @timer: pointer to struct hrtimer
+ */
+TRACE_EVENT(hrtimer_start,
+
+ TP_PROTO(struct hrtimer *hrtimer),
+
+ TP_ARGS(hrtimer),
+
+ TP_STRUCT__entry(
+ __field( void *, hrtimer )
+ __field( void *, function )
+ __field( s64, expires )
+ __field( s64, softexpires )
+ ),
+
+ TP_fast_assign(
+ __entry->hrtimer = hrtimer;
+ __entry->function = hrtimer->function;
+ __entry->expires = hrtimer_get_expires(hrtimer).tv64;
+ __entry->softexpires = hrtimer_get_softexpires(hrtimer).tv64;
+ ),
+
+ TP_printk("hrtimer=%p function=%pf expires=%llu softexpires=%llu",
+ __entry->hrtimer, __entry->function,
+ (unsigned long long)ktime_to_ns((ktime_t) {
+ .tv64 = __entry->expires }),
+ (unsigned long long)ktime_to_ns((ktime_t) {
+ .tv64 = __entry->softexpires }))
+);
+
+/**
+ * htimmer_expire_entry - called immediately before the hrtimer callback
+ * @timer: pointer to struct hrtimer
+ * @now: pointer to variable which contains current time of the
+ * timers base.
+ *
+ * Allows to determine the timer latency.
+ */
+TRACE_EVENT(hrtimer_expire_entry,
+
+ TP_PROTO(struct hrtimer *hrtimer, ktime_t *now),
+
+ TP_ARGS(hrtimer, now),
+
+ TP_STRUCT__entry(
+ __field( void *, hrtimer )
+ __field( s64, now )
+ __field( void *, function)
+ ),
+
+ TP_fast_assign(
+ __entry->hrtimer = hrtimer;
+ __entry->now = now->tv64;
+ __entry->function = hrtimer->function;
+ ),
+
+ TP_printk("hrtimer=%p function=%pf now=%llu", __entry->hrtimer, __entry->function,
+ (unsigned long long)ktime_to_ns((ktime_t) { .tv64 = __entry->now }))
+ );
+
+DECLARE_EVENT_CLASS(hrtimer_class,
+
+ TP_PROTO(struct hrtimer *hrtimer),
+
+ TP_ARGS(hrtimer),
+
+ TP_STRUCT__entry(
+ __field( void *, hrtimer )
+ ),
+
+ TP_fast_assign(
+ __entry->hrtimer = hrtimer;
+ ),
+
+ TP_printk("hrtimer=%p", __entry->hrtimer)
+);
+
+/**
+ * hrtimer_expire_exit - called immediately after the hrtimer callback returns
+ * @timer: pointer to struct hrtimer
+ *
+ * When used in combination with the hrtimer_expire_entry tracepoint we can
+ * determine the runtime of the callback function.
+ */
+DEFINE_EVENT(hrtimer_class, hrtimer_expire_exit,
+
+ TP_PROTO(struct hrtimer *hrtimer),
+
+ TP_ARGS(hrtimer)
+);
+
+/**
+ * hrtimer_cancel - called when the hrtimer is canceled
+ * @hrtimer: pointer to struct hrtimer
+ */
+DEFINE_EVENT(hrtimer_class, hrtimer_cancel,
+
+ TP_PROTO(struct hrtimer *hrtimer),
+
+ TP_ARGS(hrtimer)
+);
+
+/**
+ * itimer_state - called when itimer is started or canceled
+ * @which: name of the interval timer
+ * @value: the itimers value, itimer is canceled if value->it_value is
+ * zero, otherwise it is started
+ * @expires: the itimers expiry time
+ */
+TRACE_EVENT(itimer_state,
+
+ TP_PROTO(int which, const struct itimerval *const value,
+ cputime_t expires),
+
+ TP_ARGS(which, value, expires),
+
+ TP_STRUCT__entry(
+ __field( int, which )
+ __field( cputime_t, expires )
+ __field( long, value_sec )
+ __field( long, value_usec )
+ __field( long, interval_sec )
+ __field( long, interval_usec )
+ ),
+
+ TP_fast_assign(
+ __entry->which = which;
+ __entry->expires = expires;
+ __entry->value_sec = value->it_value.tv_sec;
+ __entry->value_usec = value->it_value.tv_usec;
+ __entry->interval_sec = value->it_interval.tv_sec;
+ __entry->interval_usec = value->it_interval.tv_usec;
+ ),
+
+ TP_printk("which=%d expires=%llu it_value=%ld.%ld it_interval=%ld.%ld",
+ __entry->which, (unsigned long long)__entry->expires,
+ __entry->value_sec, __entry->value_usec,
+ __entry->interval_sec, __entry->interval_usec)
+);
+
+/**
+ * itimer_expire - called when itimer expires
+ * @which: type of the interval timer
+ * @pid: pid of the process which owns the timer
+ * @now: current time, used to calculate the latency of itimer
+ */
+TRACE_EVENT(itimer_expire,
+
+ TP_PROTO(int which, struct pid *pid, cputime_t now),
+
+ TP_ARGS(which, pid, now),
+
+ TP_STRUCT__entry(
+ __field( int , which )
+ __field( pid_t, pid )
+ __field( cputime_t, now )
+ ),
+
+ TP_fast_assign(
+ __entry->which = which;
+ __entry->now = now;
+ __entry->pid = pid_nr(pid);
+ ),
+
+ TP_printk("which=%d pid=%d now=%llu", __entry->which,
+ (int) __entry->pid, (unsigned long long)__entry->now)
+);
+
+#endif /* _TRACE_TIMER_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h
new file mode 100644
index 00000000..b2c33bd9
--- /dev/null
+++ b/include/trace/events/vmscan.h
@@ -0,0 +1,400 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM vmscan
+
+#if !defined(_TRACE_VMSCAN_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_VMSCAN_H
+
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+#include <linux/mm.h>
+#include <linux/memcontrol.h>
+#include "gfpflags.h"
+
+#define RECLAIM_WB_ANON 0x0001u
+#define RECLAIM_WB_FILE 0x0002u
+#define RECLAIM_WB_MIXED 0x0010u
+#define RECLAIM_WB_SYNC 0x0004u
+#define RECLAIM_WB_ASYNC 0x0008u
+
+#define show_reclaim_flags(flags) \
+ (flags) ? __print_flags(flags, "|", \
+ {RECLAIM_WB_ANON, "RECLAIM_WB_ANON"}, \
+ {RECLAIM_WB_FILE, "RECLAIM_WB_FILE"}, \
+ {RECLAIM_WB_MIXED, "RECLAIM_WB_MIXED"}, \
+ {RECLAIM_WB_SYNC, "RECLAIM_WB_SYNC"}, \
+ {RECLAIM_WB_ASYNC, "RECLAIM_WB_ASYNC"} \
+ ) : "RECLAIM_WB_NONE"
+
+#define trace_reclaim_flags(page, sync) ( \
+ (page_is_file_cache(page) ? RECLAIM_WB_FILE : RECLAIM_WB_ANON) | \
+ (sync & RECLAIM_MODE_SYNC ? RECLAIM_WB_SYNC : RECLAIM_WB_ASYNC) \
+ )
+
+#define trace_shrink_flags(file, sync) ( \
+ (sync & RECLAIM_MODE_SYNC ? RECLAIM_WB_MIXED : \
+ (file ? RECLAIM_WB_FILE : RECLAIM_WB_ANON)) | \
+ (sync & RECLAIM_MODE_SYNC ? RECLAIM_WB_SYNC : RECLAIM_WB_ASYNC) \
+ )
+
+TRACE_EVENT(mm_vmscan_kswapd_sleep,
+
+ TP_PROTO(int nid),
+
+ TP_ARGS(nid),
+
+ TP_STRUCT__entry(
+ __field( int, nid )
+ ),
+
+ TP_fast_assign(
+ __entry->nid = nid;
+ ),
+
+ TP_printk("nid=%d", __entry->nid)
+);
+
+TRACE_EVENT(mm_vmscan_kswapd_wake,
+
+ TP_PROTO(int nid, int order),
+
+ TP_ARGS(nid, order),
+
+ TP_STRUCT__entry(
+ __field( int, nid )
+ __field( int, order )
+ ),
+
+ TP_fast_assign(
+ __entry->nid = nid;
+ __entry->order = order;
+ ),
+
+ TP_printk("nid=%d order=%d", __entry->nid, __entry->order)
+);
+
+TRACE_EVENT(mm_vmscan_wakeup_kswapd,
+
+ TP_PROTO(int nid, int zid, int order),
+
+ TP_ARGS(nid, zid, order),
+
+ TP_STRUCT__entry(
+ __field( int, nid )
+ __field( int, zid )
+ __field( int, order )
+ ),
+
+ TP_fast_assign(
+ __entry->nid = nid;
+ __entry->zid = zid;
+ __entry->order = order;
+ ),
+
+ TP_printk("nid=%d zid=%d order=%d",
+ __entry->nid,
+ __entry->zid,
+ __entry->order)
+);
+
+DECLARE_EVENT_CLASS(mm_vmscan_direct_reclaim_begin_template,
+
+ TP_PROTO(int order, int may_writepage, gfp_t gfp_flags),
+
+ TP_ARGS(order, may_writepage, gfp_flags),
+
+ TP_STRUCT__entry(
+ __field( int, order )
+ __field( int, may_writepage )
+ __field( gfp_t, gfp_flags )
+ ),
+
+ TP_fast_assign(
+ __entry->order = order;
+ __entry->may_writepage = may_writepage;
+ __entry->gfp_flags = gfp_flags;
+ ),
+
+ TP_printk("order=%d may_writepage=%d gfp_flags=%s",
+ __entry->order,
+ __entry->may_writepage,
+ show_gfp_flags(__entry->gfp_flags))
+);
+
+DEFINE_EVENT(mm_vmscan_direct_reclaim_begin_template, mm_vmscan_direct_reclaim_begin,
+
+ TP_PROTO(int order, int may_writepage, gfp_t gfp_flags),
+
+ TP_ARGS(order, may_writepage, gfp_flags)
+);
+
+DEFINE_EVENT(mm_vmscan_direct_reclaim_begin_template, mm_vmscan_memcg_reclaim_begin,
+
+ TP_PROTO(int order, int may_writepage, gfp_t gfp_flags),
+
+ TP_ARGS(order, may_writepage, gfp_flags)
+);
+
+DEFINE_EVENT(mm_vmscan_direct_reclaim_begin_template, mm_vmscan_memcg_softlimit_reclaim_begin,
+
+ TP_PROTO(int order, int may_writepage, gfp_t gfp_flags),
+
+ TP_ARGS(order, may_writepage, gfp_flags)
+);
+
+DECLARE_EVENT_CLASS(mm_vmscan_direct_reclaim_end_template,
+
+ TP_PROTO(unsigned long nr_reclaimed),
+
+ TP_ARGS(nr_reclaimed),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, nr_reclaimed )
+ ),
+
+ TP_fast_assign(
+ __entry->nr_reclaimed = nr_reclaimed;
+ ),
+
+ TP_printk("nr_reclaimed=%lu", __entry->nr_reclaimed)
+);
+
+DEFINE_EVENT(mm_vmscan_direct_reclaim_end_template, mm_vmscan_direct_reclaim_end,
+
+ TP_PROTO(unsigned long nr_reclaimed),
+
+ TP_ARGS(nr_reclaimed)
+);
+
+DEFINE_EVENT(mm_vmscan_direct_reclaim_end_template, mm_vmscan_memcg_reclaim_end,
+
+ TP_PROTO(unsigned long nr_reclaimed),
+
+ TP_ARGS(nr_reclaimed)
+);
+
+DEFINE_EVENT(mm_vmscan_direct_reclaim_end_template, mm_vmscan_memcg_softlimit_reclaim_end,
+
+ TP_PROTO(unsigned long nr_reclaimed),
+
+ TP_ARGS(nr_reclaimed)
+);
+
+
+DECLARE_EVENT_CLASS(mm_vmscan_lru_isolate_template,
+
+ TP_PROTO(int order,
+ unsigned long nr_requested,
+ unsigned long nr_scanned,
+ unsigned long nr_taken,
+ unsigned long nr_lumpy_taken,
+ unsigned long nr_lumpy_dirty,
+ unsigned long nr_lumpy_failed,
+ int isolate_mode),
+
+ TP_ARGS(order, nr_requested, nr_scanned, nr_taken, nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed, isolate_mode),
+
+ TP_STRUCT__entry(
+ __field(int, order)
+ __field(unsigned long, nr_requested)
+ __field(unsigned long, nr_scanned)
+ __field(unsigned long, nr_taken)
+ __field(unsigned long, nr_lumpy_taken)
+ __field(unsigned long, nr_lumpy_dirty)
+ __field(unsigned long, nr_lumpy_failed)
+ __field(int, isolate_mode)
+ ),
+
+ TP_fast_assign(
+ __entry->order = order;
+ __entry->nr_requested = nr_requested;
+ __entry->nr_scanned = nr_scanned;
+ __entry->nr_taken = nr_taken;
+ __entry->nr_lumpy_taken = nr_lumpy_taken;
+ __entry->nr_lumpy_dirty = nr_lumpy_dirty;
+ __entry->nr_lumpy_failed = nr_lumpy_failed;
+ __entry->isolate_mode = isolate_mode;
+ ),
+
+ TP_printk("isolate_mode=%d order=%d nr_requested=%lu nr_scanned=%lu nr_taken=%lu contig_taken=%lu contig_dirty=%lu contig_failed=%lu",
+ __entry->isolate_mode,
+ __entry->order,
+ __entry->nr_requested,
+ __entry->nr_scanned,
+ __entry->nr_taken,
+ __entry->nr_lumpy_taken,
+ __entry->nr_lumpy_dirty,
+ __entry->nr_lumpy_failed)
+);
+
+DEFINE_EVENT(mm_vmscan_lru_isolate_template, mm_vmscan_lru_isolate,
+
+ TP_PROTO(int order,
+ unsigned long nr_requested,
+ unsigned long nr_scanned,
+ unsigned long nr_taken,
+ unsigned long nr_lumpy_taken,
+ unsigned long nr_lumpy_dirty,
+ unsigned long nr_lumpy_failed,
+ int isolate_mode),
+
+ TP_ARGS(order, nr_requested, nr_scanned, nr_taken, nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed, isolate_mode)
+
+);
+
+DEFINE_EVENT(mm_vmscan_lru_isolate_template, mm_vmscan_memcg_isolate,
+
+ TP_PROTO(int order,
+ unsigned long nr_requested,
+ unsigned long nr_scanned,
+ unsigned long nr_taken,
+ unsigned long nr_lumpy_taken,
+ unsigned long nr_lumpy_dirty,
+ unsigned long nr_lumpy_failed,
+ int isolate_mode),
+
+ TP_ARGS(order, nr_requested, nr_scanned, nr_taken, nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed, isolate_mode)
+
+);
+
+TRACE_EVENT(mm_vmscan_writepage,
+
+ TP_PROTO(struct page *page,
+ int reclaim_flags),
+
+ TP_ARGS(page, reclaim_flags),
+
+ TP_STRUCT__entry(
+ __field(struct page *, page)
+ __field(int, reclaim_flags)
+ ),
+
+ TP_fast_assign(
+ __entry->page = page;
+ __entry->reclaim_flags = reclaim_flags;
+ ),
+
+ TP_printk("page=%p pfn=%lu flags=%s",
+ __entry->page,
+ page_to_pfn(__entry->page),
+ show_reclaim_flags(__entry->reclaim_flags))
+);
+
+TRACE_EVENT(mm_vmscan_lru_shrink_inactive,
+
+ TP_PROTO(int nid, int zid,
+ unsigned long nr_scanned, unsigned long nr_reclaimed,
+ int priority, int reclaim_flags),
+
+ TP_ARGS(nid, zid, nr_scanned, nr_reclaimed, priority, reclaim_flags),
+
+ TP_STRUCT__entry(
+ __field(int, nid)
+ __field(int, zid)
+ __field(unsigned long, nr_scanned)
+ __field(unsigned long, nr_reclaimed)
+ __field(int, priority)
+ __field(int, reclaim_flags)
+ ),
+
+ TP_fast_assign(
+ __entry->nid = nid;
+ __entry->zid = zid;
+ __entry->nr_scanned = nr_scanned;
+ __entry->nr_reclaimed = nr_reclaimed;
+ __entry->priority = priority;
+ __entry->reclaim_flags = reclaim_flags;
+ ),
+
+ TP_printk("nid=%d zid=%d nr_scanned=%ld nr_reclaimed=%ld priority=%d flags=%s",
+ __entry->nid, __entry->zid,
+ __entry->nr_scanned, __entry->nr_reclaimed,
+ __entry->priority,
+ show_reclaim_flags(__entry->reclaim_flags))
+);
+
+TRACE_EVENT(replace_swap_token,
+ TP_PROTO(struct mm_struct *old_mm,
+ struct mm_struct *new_mm),
+
+ TP_ARGS(old_mm, new_mm),
+
+ TP_STRUCT__entry(
+ __field(struct mm_struct*, old_mm)
+ __field(unsigned int, old_prio)
+ __field(struct mm_struct*, new_mm)
+ __field(unsigned int, new_prio)
+ ),
+
+ TP_fast_assign(
+ __entry->old_mm = old_mm;
+ __entry->old_prio = old_mm ? old_mm->token_priority : 0;
+ __entry->new_mm = new_mm;
+ __entry->new_prio = new_mm->token_priority;
+ ),
+
+ TP_printk("old_token_mm=%p old_prio=%u new_token_mm=%p new_prio=%u",
+ __entry->old_mm, __entry->old_prio,
+ __entry->new_mm, __entry->new_prio)
+);
+
+DECLARE_EVENT_CLASS(put_swap_token_template,
+ TP_PROTO(struct mm_struct *swap_token_mm),
+
+ TP_ARGS(swap_token_mm),
+
+ TP_STRUCT__entry(
+ __field(struct mm_struct*, swap_token_mm)
+ ),
+
+ TP_fast_assign(
+ __entry->swap_token_mm = swap_token_mm;
+ ),
+
+ TP_printk("token_mm=%p", __entry->swap_token_mm)
+);
+
+DEFINE_EVENT(put_swap_token_template, put_swap_token,
+ TP_PROTO(struct mm_struct *swap_token_mm),
+ TP_ARGS(swap_token_mm)
+);
+
+DEFINE_EVENT_CONDITION(put_swap_token_template, disable_swap_token,
+ TP_PROTO(struct mm_struct *swap_token_mm),
+ TP_ARGS(swap_token_mm),
+ TP_CONDITION(swap_token_mm != NULL)
+);
+
+TRACE_EVENT_CONDITION(update_swap_token_priority,
+ TP_PROTO(struct mm_struct *mm,
+ unsigned int old_prio,
+ struct mm_struct *swap_token_mm),
+
+ TP_ARGS(mm, old_prio, swap_token_mm),
+
+ TP_CONDITION(mm->token_priority != old_prio),
+
+ TP_STRUCT__entry(
+ __field(struct mm_struct*, mm)
+ __field(unsigned int, old_prio)
+ __field(unsigned int, new_prio)
+ __field(struct mm_struct*, swap_token_mm)
+ __field(unsigned int, swap_token_prio)
+ ),
+
+ TP_fast_assign(
+ __entry->mm = mm;
+ __entry->old_prio = old_prio;
+ __entry->new_prio = mm->token_priority;
+ __entry->swap_token_mm = swap_token_mm;
+ __entry->swap_token_prio = swap_token_mm ? swap_token_mm->token_priority : 0;
+ ),
+
+ TP_printk("mm=%p old_prio=%u new_prio=%u swap_token_mm=%p token_prio=%u",
+ __entry->mm, __entry->old_prio, __entry->new_prio,
+ __entry->swap_token_mm, __entry->swap_token_prio)
+);
+
+#endif /* _TRACE_VMSCAN_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/workqueue.h b/include/trace/events/workqueue.h
new file mode 100644
index 00000000..7d497291
--- /dev/null
+++ b/include/trace/events/workqueue.h
@@ -0,0 +1,121 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM workqueue
+
+#if !defined(_TRACE_WORKQUEUE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_WORKQUEUE_H
+
+#include <linux/tracepoint.h>
+#include <linux/workqueue.h>
+
+DECLARE_EVENT_CLASS(workqueue_work,
+
+ TP_PROTO(struct work_struct *work),
+
+ TP_ARGS(work),
+
+ TP_STRUCT__entry(
+ __field( void *, work )
+ ),
+
+ TP_fast_assign(
+ __entry->work = work;
+ ),
+
+ TP_printk("work struct %p", __entry->work)
+);
+
+/**
+ * workqueue_queue_work - called when a work gets queued
+ * @req_cpu: the requested cpu
+ * @cwq: pointer to struct cpu_workqueue_struct
+ * @work: pointer to struct work_struct
+ *
+ * This event occurs when a work is queued immediately or once a
+ * delayed work is actually queued on a workqueue (ie: once the delay
+ * has been reached).
+ */
+TRACE_EVENT(workqueue_queue_work,
+
+ TP_PROTO(unsigned int req_cpu, struct cpu_workqueue_struct *cwq,
+ struct work_struct *work),
+
+ TP_ARGS(req_cpu, cwq, work),
+
+ TP_STRUCT__entry(
+ __field( void *, work )
+ __field( void *, function)
+ __field( void *, workqueue)
+ __field( unsigned int, req_cpu )
+ __field( unsigned int, cpu )
+ ),
+
+ TP_fast_assign(
+ __entry->work = work;
+ __entry->function = work->func;
+ __entry->workqueue = cwq->wq;
+ __entry->req_cpu = req_cpu;
+ __entry->cpu = cwq->gcwq->cpu;
+ ),
+
+ TP_printk("work struct=%p function=%pf workqueue=%p req_cpu=%u cpu=%u",
+ __entry->work, __entry->function, __entry->workqueue,
+ __entry->req_cpu, __entry->cpu)
+);
+
+/**
+ * workqueue_activate_work - called when a work gets activated
+ * @work: pointer to struct work_struct
+ *
+ * This event occurs when a queued work is put on the active queue,
+ * which happens immediately after queueing unless @max_active limit
+ * is reached.
+ */
+DEFINE_EVENT(workqueue_work, workqueue_activate_work,
+
+ TP_PROTO(struct work_struct *work),
+
+ TP_ARGS(work)
+);
+
+/**
+ * workqueue_execute_start - called immediately before the workqueue callback
+ * @work: pointer to struct work_struct
+ *
+ * Allows to track workqueue execution.
+ */
+TRACE_EVENT(workqueue_execute_start,
+
+ TP_PROTO(struct work_struct *work),
+
+ TP_ARGS(work),
+
+ TP_STRUCT__entry(
+ __field( void *, work )
+ __field( void *, function)
+ ),
+
+ TP_fast_assign(
+ __entry->work = work;
+ __entry->function = work->func;
+ ),
+
+ TP_printk("work struct %p: function %pf", __entry->work, __entry->function)
+);
+
+/**
+ * workqueue_execute_end - called immediately before the workqueue callback
+ * @work: pointer to struct work_struct
+ *
+ * Allows to track workqueue execution.
+ */
+DEFINE_EVENT(workqueue_work, workqueue_execute_end,
+
+ TP_PROTO(struct work_struct *work),
+
+ TP_ARGS(work)
+);
+
+#endif /* _TRACE_WORKQUEUE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
new file mode 100644
index 00000000..9b60c6fc
--- /dev/null
+++ b/include/trace/events/writeback.h
@@ -0,0 +1,196 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM writeback
+
+#if !defined(_TRACE_WRITEBACK_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_WRITEBACK_H
+
+#include <linux/backing-dev.h>
+#include <linux/device.h>
+#include <linux/writeback.h>
+
+struct wb_writeback_work;
+
+DECLARE_EVENT_CLASS(writeback_work_class,
+ TP_PROTO(struct backing_dev_info *bdi, struct wb_writeback_work *work),
+ TP_ARGS(bdi, work),
+ TP_STRUCT__entry(
+ __array(char, name, 32)
+ __field(long, nr_pages)
+ __field(dev_t, sb_dev)
+ __field(int, sync_mode)
+ __field(int, for_kupdate)
+ __field(int, range_cyclic)
+ __field(int, for_background)
+ ),
+ TP_fast_assign(
+ struct device *dev = bdi->dev;
+ if (!dev)
+ dev = default_backing_dev_info.dev;
+ strncpy(__entry->name, dev_name(dev), 32);
+ __entry->nr_pages = work->nr_pages;
+ __entry->sb_dev = work->sb ? work->sb->s_dev : 0;
+ __entry->sync_mode = work->sync_mode;
+ __entry->for_kupdate = work->for_kupdate;
+ __entry->range_cyclic = work->range_cyclic;
+ __entry->for_background = work->for_background;
+ ),
+ TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d "
+ "kupdate=%d range_cyclic=%d background=%d",
+ __entry->name,
+ MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev),
+ __entry->nr_pages,
+ __entry->sync_mode,
+ __entry->for_kupdate,
+ __entry->range_cyclic,
+ __entry->for_background
+ )
+);
+#define DEFINE_WRITEBACK_WORK_EVENT(name) \
+DEFINE_EVENT(writeback_work_class, name, \
+ TP_PROTO(struct backing_dev_info *bdi, struct wb_writeback_work *work), \
+ TP_ARGS(bdi, work))
+DEFINE_WRITEBACK_WORK_EVENT(writeback_nothread);
+DEFINE_WRITEBACK_WORK_EVENT(writeback_queue);
+DEFINE_WRITEBACK_WORK_EVENT(writeback_exec);
+
+TRACE_EVENT(writeback_pages_written,
+ TP_PROTO(long pages_written),
+ TP_ARGS(pages_written),
+ TP_STRUCT__entry(
+ __field(long, pages)
+ ),
+ TP_fast_assign(
+ __entry->pages = pages_written;
+ ),
+ TP_printk("%ld", __entry->pages)
+);
+
+DECLARE_EVENT_CLASS(writeback_class,
+ TP_PROTO(struct backing_dev_info *bdi),
+ TP_ARGS(bdi),
+ TP_STRUCT__entry(
+ __array(char, name, 32)
+ ),
+ TP_fast_assign(
+ strncpy(__entry->name, dev_name(bdi->dev), 32);
+ ),
+ TP_printk("bdi %s",
+ __entry->name
+ )
+);
+#define DEFINE_WRITEBACK_EVENT(name) \
+DEFINE_EVENT(writeback_class, name, \
+ TP_PROTO(struct backing_dev_info *bdi), \
+ TP_ARGS(bdi))
+
+DEFINE_WRITEBACK_EVENT(writeback_nowork);
+DEFINE_WRITEBACK_EVENT(writeback_wake_background);
+DEFINE_WRITEBACK_EVENT(writeback_wake_thread);
+DEFINE_WRITEBACK_EVENT(writeback_wake_forker_thread);
+DEFINE_WRITEBACK_EVENT(writeback_bdi_register);
+DEFINE_WRITEBACK_EVENT(writeback_bdi_unregister);
+DEFINE_WRITEBACK_EVENT(writeback_thread_start);
+DEFINE_WRITEBACK_EVENT(writeback_thread_stop);
+
+DECLARE_EVENT_CLASS(wbc_class,
+ TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi),
+ TP_ARGS(wbc, bdi),
+ TP_STRUCT__entry(
+ __array(char, name, 32)
+ __field(long, nr_to_write)
+ __field(long, pages_skipped)
+ __field(int, sync_mode)
+ __field(int, for_kupdate)
+ __field(int, for_background)
+ __field(int, for_reclaim)
+ __field(int, range_cyclic)
+ __field(int, more_io)
+ __field(unsigned long, older_than_this)
+ __field(long, range_start)
+ __field(long, range_end)
+ ),
+
+ TP_fast_assign(
+ strncpy(__entry->name, dev_name(bdi->dev), 32);
+ __entry->nr_to_write = wbc->nr_to_write;
+ __entry->pages_skipped = wbc->pages_skipped;
+ __entry->sync_mode = wbc->sync_mode;
+ __entry->for_kupdate = wbc->for_kupdate;
+ __entry->for_background = wbc->for_background;
+ __entry->for_reclaim = wbc->for_reclaim;
+ __entry->range_cyclic = wbc->range_cyclic;
+ __entry->more_io = wbc->more_io;
+ __entry->older_than_this = wbc->older_than_this ?
+ *wbc->older_than_this : 0;
+ __entry->range_start = (long)wbc->range_start;
+ __entry->range_end = (long)wbc->range_end;
+ ),
+
+ TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d "
+ "bgrd=%d reclm=%d cyclic=%d more=%d older=0x%lx "
+ "start=0x%lx end=0x%lx",
+ __entry->name,
+ __entry->nr_to_write,
+ __entry->pages_skipped,
+ __entry->sync_mode,
+ __entry->for_kupdate,
+ __entry->for_background,
+ __entry->for_reclaim,
+ __entry->range_cyclic,
+ __entry->more_io,
+ __entry->older_than_this,
+ __entry->range_start,
+ __entry->range_end)
+)
+
+#define DEFINE_WBC_EVENT(name) \
+DEFINE_EVENT(wbc_class, name, \
+ TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), \
+ TP_ARGS(wbc, bdi))
+DEFINE_WBC_EVENT(wbc_writeback_start);
+DEFINE_WBC_EVENT(wbc_writeback_written);
+DEFINE_WBC_EVENT(wbc_writeback_wait);
+DEFINE_WBC_EVENT(wbc_balance_dirty_start);
+DEFINE_WBC_EVENT(wbc_balance_dirty_written);
+DEFINE_WBC_EVENT(wbc_balance_dirty_wait);
+DEFINE_WBC_EVENT(wbc_writepage);
+
+DECLARE_EVENT_CLASS(writeback_congest_waited_template,
+
+ TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
+
+ TP_ARGS(usec_timeout, usec_delayed),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, usec_timeout )
+ __field( unsigned int, usec_delayed )
+ ),
+
+ TP_fast_assign(
+ __entry->usec_timeout = usec_timeout;
+ __entry->usec_delayed = usec_delayed;
+ ),
+
+ TP_printk("usec_timeout=%u usec_delayed=%u",
+ __entry->usec_timeout,
+ __entry->usec_delayed)
+);
+
+DEFINE_EVENT(writeback_congest_waited_template, writeback_congestion_wait,
+
+ TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
+
+ TP_ARGS(usec_timeout, usec_delayed)
+);
+
+DEFINE_EVENT(writeback_congest_waited_template, writeback_wait_iff_congested,
+
+ TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
+
+ TP_ARGS(usec_timeout, usec_delayed)
+);
+
+#endif /* _TRACE_WRITEBACK_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
new file mode 100644
index 00000000..533c49f4
--- /dev/null
+++ b/include/trace/ftrace.h
@@ -0,0 +1,775 @@
+/*
+ * Stage 1 of the trace events.
+ *
+ * Override the macros in <trace/trace_events.h> to include the following:
+ *
+ * struct ftrace_raw_<call> {
+ * struct trace_entry ent;
+ * <type> <item>;
+ * <type2> <item2>[<len>];
+ * [...]
+ * };
+ *
+ * The <type> <item> is created by the __field(type, item) macro or
+ * the __array(type2, item2, len) macro.
+ * We simply do "type item;", and that will create the fields
+ * in the structure.
+ */
+
+#include <linux/ftrace_event.h>
+
+/*
+ * DECLARE_EVENT_CLASS can be used to add a generic function
+ * handlers for events. That is, if all events have the same
+ * parameters and just have distinct trace points.
+ * Each tracepoint can be defined with DEFINE_EVENT and that
+ * will map the DECLARE_EVENT_CLASS to the tracepoint.
+ *
+ * TRACE_EVENT is a one to one mapping between tracepoint and template.
+ */
+#undef TRACE_EVENT
+#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
+ DECLARE_EVENT_CLASS(name, \
+ PARAMS(proto), \
+ PARAMS(args), \
+ PARAMS(tstruct), \
+ PARAMS(assign), \
+ PARAMS(print)); \
+ DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args));
+
+
+#undef __field
+#define __field(type, item) type item;
+
+#undef __field_ext
+#define __field_ext(type, item, filter_type) type item;
+
+#undef __array
+#define __array(type, item, len) type item[len];
+
+#undef __dynamic_array
+#define __dynamic_array(type, item, len) u32 __data_loc_##item;
+
+#undef __string
+#define __string(item, src) __dynamic_array(char, item, -1)
+
+#undef TP_STRUCT__entry
+#define TP_STRUCT__entry(args...) args
+
+#undef DECLARE_EVENT_CLASS
+#define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \
+ struct ftrace_raw_##name { \
+ struct trace_entry ent; \
+ tstruct \
+ char __data[0]; \
+ }; \
+ \
+ static struct ftrace_event_class event_class_##name;
+
+#undef DEFINE_EVENT
+#define DEFINE_EVENT(template, name, proto, args) \
+ static struct ftrace_event_call __used \
+ __attribute__((__aligned__(4))) event_##name
+
+#undef DEFINE_EVENT_PRINT
+#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
+ DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
+
+/* Callbacks are meaningless to ftrace. */
+#undef TRACE_EVENT_FN
+#define TRACE_EVENT_FN(name, proto, args, tstruct, \
+ assign, print, reg, unreg) \
+ TRACE_EVENT(name, PARAMS(proto), PARAMS(args), \
+ PARAMS(tstruct), PARAMS(assign), PARAMS(print)) \
+
+#undef TRACE_EVENT_FLAGS
+#define TRACE_EVENT_FLAGS(name, value) \
+ __TRACE_EVENT_FLAGS(name, value)
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+
+
+/*
+ * Stage 2 of the trace events.
+ *
+ * Include the following:
+ *
+ * struct ftrace_data_offsets_<call> {
+ * u32 <item1>;
+ * u32 <item2>;
+ * [...]
+ * };
+ *
+ * The __dynamic_array() macro will create each u32 <item>, this is
+ * to keep the offset of each array from the beginning of the event.
+ * The size of an array is also encoded, in the higher 16 bits of <item>.
+ */
+
+#undef __field
+#define __field(type, item)
+
+#undef __field_ext
+#define __field_ext(type, item, filter_type)
+
+#undef __array
+#define __array(type, item, len)
+
+#undef __dynamic_array
+#define __dynamic_array(type, item, len) u32 item;
+
+#undef __string
+#define __string(item, src) __dynamic_array(char, item, -1)
+
+#undef DECLARE_EVENT_CLASS
+#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
+ struct ftrace_data_offsets_##call { \
+ tstruct; \
+ };
+
+#undef DEFINE_EVENT
+#define DEFINE_EVENT(template, name, proto, args)
+
+#undef DEFINE_EVENT_PRINT
+#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
+ DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
+
+#undef TRACE_EVENT_FLAGS
+#define TRACE_EVENT_FLAGS(event, flag)
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+
+/*
+ * Stage 3 of the trace events.
+ *
+ * Override the macros in <trace/trace_events.h> to include the following:
+ *
+ * enum print_line_t
+ * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags)
+ * {
+ * struct trace_seq *s = &iter->seq;
+ * struct ftrace_raw_<call> *field; <-- defined in stage 1
+ * struct trace_entry *entry;
+ * struct trace_seq *p = &iter->tmp_seq;
+ * int ret;
+ *
+ * entry = iter->ent;
+ *
+ * if (entry->type != event_<call>->event.type) {
+ * WARN_ON_ONCE(1);
+ * return TRACE_TYPE_UNHANDLED;
+ * }
+ *
+ * field = (typeof(field))entry;
+ *
+ * trace_seq_init(p);
+ * ret = trace_seq_printf(s, "%s: ", <call>);
+ * if (ret)
+ * ret = trace_seq_printf(s, <TP_printk> "\n");
+ * if (!ret)
+ * return TRACE_TYPE_PARTIAL_LINE;
+ *
+ * return TRACE_TYPE_HANDLED;
+ * }
+ *
+ * This is the method used to print the raw event to the trace
+ * output format. Note, this is not needed if the data is read
+ * in binary.
+ */
+
+#undef __entry
+#define __entry field
+
+#undef TP_printk
+#define TP_printk(fmt, args...) fmt "\n", args
+
+#undef __get_dynamic_array
+#define __get_dynamic_array(field) \
+ ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
+
+#undef __get_str
+#define __get_str(field) (char *)__get_dynamic_array(field)
+
+#undef __print_flags
+#define __print_flags(flag, delim, flag_array...) \
+ ({ \
+ static const struct trace_print_flags __flags[] = \
+ { flag_array, { -1, NULL }}; \
+ ftrace_print_flags_seq(p, delim, flag, __flags); \
+ })
+
+#undef __print_symbolic
+#define __print_symbolic(value, symbol_array...) \
+ ({ \
+ static const struct trace_print_flags symbols[] = \
+ { symbol_array, { -1, NULL }}; \
+ ftrace_print_symbols_seq(p, value, symbols); \
+ })
+
+#undef __print_symbolic_u64
+#if BITS_PER_LONG == 32
+#define __print_symbolic_u64(value, symbol_array...) \
+ ({ \
+ static const struct trace_print_flags_u64 symbols[] = \
+ { symbol_array, { -1, NULL } }; \
+ ftrace_print_symbols_seq_u64(p, value, symbols); \
+ })
+#else
+#define __print_symbolic_u64(value, symbol_array...) \
+ __print_symbolic(value, symbol_array)
+#endif
+
+#undef __print_hex
+#define __print_hex(buf, buf_len) ftrace_print_hex_seq(p, buf, buf_len)
+
+#undef DECLARE_EVENT_CLASS
+#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
+static notrace enum print_line_t \
+ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
+ struct trace_event *trace_event) \
+{ \
+ struct ftrace_event_call *event; \
+ struct trace_seq *s = &iter->seq; \
+ struct ftrace_raw_##call *field; \
+ struct trace_entry *entry; \
+ struct trace_seq *p = &iter->tmp_seq; \
+ int ret; \
+ \
+ event = container_of(trace_event, struct ftrace_event_call, \
+ event); \
+ \
+ entry = iter->ent; \
+ \
+ if (entry->type != event->event.type) { \
+ WARN_ON_ONCE(1); \
+ return TRACE_TYPE_UNHANDLED; \
+ } \
+ \
+ field = (typeof(field))entry; \
+ \
+ trace_seq_init(p); \
+ ret = trace_seq_printf(s, "%s: ", event->name); \
+ if (ret) \
+ ret = trace_seq_printf(s, print); \
+ if (!ret) \
+ return TRACE_TYPE_PARTIAL_LINE; \
+ \
+ return TRACE_TYPE_HANDLED; \
+} \
+static struct trace_event_functions ftrace_event_type_funcs_##call = { \
+ .trace = ftrace_raw_output_##call, \
+};
+
+#undef DEFINE_EVENT_PRINT
+#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
+static notrace enum print_line_t \
+ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
+ struct trace_event *event) \
+{ \
+ struct trace_seq *s = &iter->seq; \
+ struct ftrace_raw_##template *field; \
+ struct trace_entry *entry; \
+ struct trace_seq *p = &iter->tmp_seq; \
+ int ret; \
+ \
+ entry = iter->ent; \
+ \
+ if (entry->type != event_##call.event.type) { \
+ WARN_ON_ONCE(1); \
+ return TRACE_TYPE_UNHANDLED; \
+ } \
+ \
+ field = (typeof(field))entry; \
+ \
+ trace_seq_init(p); \
+ ret = trace_seq_printf(s, "%s: ", #call); \
+ if (ret) \
+ ret = trace_seq_printf(s, print); \
+ if (!ret) \
+ return TRACE_TYPE_PARTIAL_LINE; \
+ \
+ return TRACE_TYPE_HANDLED; \
+} \
+static struct trace_event_functions ftrace_event_type_funcs_##call = { \
+ .trace = ftrace_raw_output_##call, \
+};
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+
+#undef __field_ext
+#define __field_ext(type, item, filter_type) \
+ ret = trace_define_field(event_call, #type, #item, \
+ offsetof(typeof(field), item), \
+ sizeof(field.item), \
+ is_signed_type(type), filter_type); \
+ if (ret) \
+ return ret;
+
+#undef __field
+#define __field(type, item) __field_ext(type, item, FILTER_OTHER)
+
+#undef __array
+#define __array(type, item, len) \
+ do { \
+ mutex_lock(&event_storage_mutex); \
+ BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
+ snprintf(event_storage, sizeof(event_storage), \
+ "%s[%d]", #type, len); \
+ ret = trace_define_field(event_call, event_storage, #item, \
+ offsetof(typeof(field), item), \
+ sizeof(field.item), \
+ is_signed_type(type), FILTER_OTHER); \
+ mutex_unlock(&event_storage_mutex); \
+ if (ret) \
+ return ret; \
+ } while (0);
+
+#undef __dynamic_array
+#define __dynamic_array(type, item, len) \
+ ret = trace_define_field(event_call, "__data_loc " #type "[]", #item, \
+ offsetof(typeof(field), __data_loc_##item), \
+ sizeof(field.__data_loc_##item), \
+ is_signed_type(type), FILTER_OTHER);
+
+#undef __string
+#define __string(item, src) __dynamic_array(char, item, -1)
+
+#undef DECLARE_EVENT_CLASS
+#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \
+static int notrace \
+ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
+{ \
+ struct ftrace_raw_##call field; \
+ int ret; \
+ \
+ tstruct; \
+ \
+ return ret; \
+}
+
+#undef DEFINE_EVENT
+#define DEFINE_EVENT(template, name, proto, args)
+
+#undef DEFINE_EVENT_PRINT
+#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
+ DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+
+/*
+ * remember the offset of each array from the beginning of the event.
+ */
+
+#undef __entry
+#define __entry entry
+
+#undef __field
+#define __field(type, item)
+
+#undef __field_ext
+#define __field_ext(type, item, filter_type)
+
+#undef __array
+#define __array(type, item, len)
+
+#undef __dynamic_array
+#define __dynamic_array(type, item, len) \
+ __data_offsets->item = __data_size + \
+ offsetof(typeof(*entry), __data); \
+ __data_offsets->item |= (len * sizeof(type)) << 16; \
+ __data_size += (len) * sizeof(type);
+
+#undef __string
+#define __string(item, src) __dynamic_array(char, item, strlen(src) + 1)
+
+#undef DECLARE_EVENT_CLASS
+#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
+static inline notrace int ftrace_get_offsets_##call( \
+ struct ftrace_data_offsets_##call *__data_offsets, proto) \
+{ \
+ int __data_size = 0; \
+ struct ftrace_raw_##call __maybe_unused *entry; \
+ \
+ tstruct; \
+ \
+ return __data_size; \
+}
+
+#undef DEFINE_EVENT
+#define DEFINE_EVENT(template, name, proto, args)
+
+#undef DEFINE_EVENT_PRINT
+#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
+ DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+
+/*
+ * Stage 4 of the trace events.
+ *
+ * Override the macros in <trace/trace_events.h> to include the following:
+ *
+ * For those macros defined with TRACE_EVENT:
+ *
+ * static struct ftrace_event_call event_<call>;
+ *
+ * static void ftrace_raw_event_<call>(void *__data, proto)
+ * {
+ * struct ftrace_event_call *event_call = __data;
+ * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
+ * struct ring_buffer_event *event;
+ * struct ftrace_raw_<call> *entry; <-- defined in stage 1
+ * struct ring_buffer *buffer;
+ * unsigned long irq_flags;
+ * int __data_size;
+ * int pc;
+ *
+ * local_save_flags(irq_flags);
+ * pc = preempt_count();
+ *
+ * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
+ *
+ * event = trace_current_buffer_lock_reserve(&buffer,
+ * event_<call>->event.type,
+ * sizeof(*entry) + __data_size,
+ * irq_flags, pc);
+ * if (!event)
+ * return;
+ * entry = ring_buffer_event_data(event);
+ *
+ * { <assign>; } <-- Here we assign the entries by the __field and
+ * __array macros.
+ *
+ * if (!filter_current_check_discard(buffer, event_call, entry, event))
+ * trace_current_buffer_unlock_commit(buffer,
+ * event, irq_flags, pc);
+ * }
+ *
+ * static struct trace_event ftrace_event_type_<call> = {
+ * .trace = ftrace_raw_output_<call>, <-- stage 2
+ * };
+ *
+ * static const char print_fmt_<call>[] = <TP_printk>;
+ *
+ * static struct ftrace_event_class __used event_class_<template> = {
+ * .system = "<system>",
+ * .define_fields = ftrace_define_fields_<call>,
+ * .fields = LIST_HEAD_INIT(event_class_##call.fields),
+ * .raw_init = trace_event_raw_init,
+ * .probe = ftrace_raw_event_##call,
+ * .reg = ftrace_event_reg,
+ * };
+ *
+ * static struct ftrace_event_call event_<call> = {
+ * .name = "<call>",
+ * .class = event_class_<template>,
+ * .event = &ftrace_event_type_<call>,
+ * .print_fmt = print_fmt_<call>,
+ * };
+ * // its only safe to use pointers when doing linker tricks to
+ * // create an array.
+ * static struct ftrace_event_call __used
+ * __attribute__((section("_ftrace_events"))) *__event_<call> = &event_<call>;
+ *
+ */
+
+#ifdef CONFIG_PERF_EVENTS
+
+#define _TRACE_PERF_PROTO(call, proto) \
+ static notrace void \
+ perf_trace_##call(void *__data, proto);
+
+#define _TRACE_PERF_INIT(call) \
+ .perf_probe = perf_trace_##call,
+
+#else
+#define _TRACE_PERF_PROTO(call, proto)
+#define _TRACE_PERF_INIT(call)
+#endif /* CONFIG_PERF_EVENTS */
+
+#undef __entry
+#define __entry entry
+
+#undef __field
+#define __field(type, item)
+
+#undef __array
+#define __array(type, item, len)
+
+#undef __dynamic_array
+#define __dynamic_array(type, item, len) \
+ __entry->__data_loc_##item = __data_offsets.item;
+
+#undef __string
+#define __string(item, src) __dynamic_array(char, item, -1) \
+
+#undef __assign_str
+#define __assign_str(dst, src) \
+ strcpy(__get_str(dst), src);
+
+#undef TP_fast_assign
+#define TP_fast_assign(args...) args
+
+#undef TP_perf_assign
+#define TP_perf_assign(args...)
+
+#undef DECLARE_EVENT_CLASS
+#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
+ \
+static notrace void \
+ftrace_raw_event_##call(void *__data, proto) \
+{ \
+ struct ftrace_event_call *event_call = __data; \
+ struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
+ struct ring_buffer_event *event; \
+ struct ftrace_raw_##call *entry; \
+ struct ring_buffer *buffer; \
+ unsigned long irq_flags; \
+ int __data_size; \
+ int pc; \
+ \
+ local_save_flags(irq_flags); \
+ pc = preempt_count(); \
+ \
+ __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
+ \
+ event = trace_current_buffer_lock_reserve(&buffer, \
+ event_call->event.type, \
+ sizeof(*entry) + __data_size, \
+ irq_flags, pc); \
+ if (!event) \
+ return; \
+ entry = ring_buffer_event_data(event); \
+ \
+ tstruct \
+ \
+ { assign; } \
+ \
+ if (!filter_current_check_discard(buffer, event_call, entry, event)) \
+ trace_nowake_buffer_unlock_commit(buffer, \
+ event, irq_flags, pc); \
+}
+/*
+ * The ftrace_test_probe is compiled out, it is only here as a build time check
+ * to make sure that if the tracepoint handling changes, the ftrace probe will
+ * fail to compile unless it too is updated.
+ */
+
+#undef DEFINE_EVENT
+#define DEFINE_EVENT(template, call, proto, args) \
+static inline void ftrace_test_probe_##call(void) \
+{ \
+ check_trace_callback_type_##call(ftrace_raw_event_##template); \
+}
+
+#undef DEFINE_EVENT_PRINT
+#define DEFINE_EVENT_PRINT(template, name, proto, args, print)
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+
+#undef __entry
+#define __entry REC
+
+#undef __print_flags
+#undef __print_symbolic
+#undef __get_dynamic_array
+#undef __get_str
+
+#undef TP_printk
+#define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args)
+
+#undef DECLARE_EVENT_CLASS
+#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
+_TRACE_PERF_PROTO(call, PARAMS(proto)); \
+static const char print_fmt_##call[] = print; \
+static struct ftrace_event_class __used event_class_##call = { \
+ .system = __stringify(TRACE_SYSTEM), \
+ .define_fields = ftrace_define_fields_##call, \
+ .fields = LIST_HEAD_INIT(event_class_##call.fields),\
+ .raw_init = trace_event_raw_init, \
+ .probe = ftrace_raw_event_##call, \
+ .reg = ftrace_event_reg, \
+ _TRACE_PERF_INIT(call) \
+};
+
+#undef DEFINE_EVENT
+#define DEFINE_EVENT(template, call, proto, args) \
+ \
+static struct ftrace_event_call __used event_##call = { \
+ .name = #call, \
+ .class = &event_class_##template, \
+ .event.funcs = &ftrace_event_type_funcs_##template, \
+ .print_fmt = print_fmt_##template, \
+}; \
+static struct ftrace_event_call __used \
+__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
+
+#undef DEFINE_EVENT_PRINT
+#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
+ \
+static const char print_fmt_##call[] = print; \
+ \
+static struct ftrace_event_call __used event_##call = { \
+ .name = #call, \
+ .class = &event_class_##template, \
+ .event.funcs = &ftrace_event_type_funcs_##call, \
+ .print_fmt = print_fmt_##call, \
+}; \
+static struct ftrace_event_call __used \
+__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+
+/*
+ * Define the insertion callback to perf events
+ *
+ * The job is very similar to ftrace_raw_event_<call> except that we don't
+ * insert in the ring buffer but in a perf counter.
+ *
+ * static void ftrace_perf_<call>(proto)
+ * {
+ * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
+ * struct ftrace_event_call *event_call = &event_<call>;
+ * extern void perf_tp_event(int, u64, u64, void *, int);
+ * struct ftrace_raw_##call *entry;
+ * struct perf_trace_buf *trace_buf;
+ * u64 __addr = 0, __count = 1;
+ * unsigned long irq_flags;
+ * struct trace_entry *ent;
+ * int __entry_size;
+ * int __data_size;
+ * int __cpu
+ * int pc;
+ *
+ * pc = preempt_count();
+ *
+ * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
+ *
+ * // Below we want to get the aligned size by taking into account
+ * // the u32 field that will later store the buffer size
+ * __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),
+ * sizeof(u64));
+ * __entry_size -= sizeof(u32);
+ *
+ * // Protect the non nmi buffer
+ * // This also protects the rcu read side
+ * local_irq_save(irq_flags);
+ * __cpu = smp_processor_id();
+ *
+ * if (in_nmi())
+ * trace_buf = rcu_dereference_sched(perf_trace_buf_nmi);
+ * else
+ * trace_buf = rcu_dereference_sched(perf_trace_buf);
+ *
+ * if (!trace_buf)
+ * goto end;
+ *
+ * trace_buf = per_cpu_ptr(trace_buf, __cpu);
+ *
+ * // Avoid recursion from perf that could mess up the buffer
+ * if (trace_buf->recursion++)
+ * goto end_recursion;
+ *
+ * raw_data = trace_buf->buf;
+ *
+ * // Make recursion update visible before entering perf_tp_event
+ * // so that we protect from perf recursions.
+ *
+ * barrier();
+ *
+ * //zero dead bytes from alignment to avoid stack leak to userspace:
+ * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
+ * entry = (struct ftrace_raw_<call> *)raw_data;
+ * ent = &entry->ent;
+ * tracing_generic_entry_update(ent, irq_flags, pc);
+ * ent->type = event_call->id;
+ *
+ * <tstruct> <- do some jobs with dynamic arrays
+ *
+ * <assign> <- affect our values
+ *
+ * perf_tp_event(event_call->id, __addr, __count, entry,
+ * __entry_size); <- submit them to perf counter
+ *
+ * }
+ */
+
+#ifdef CONFIG_PERF_EVENTS
+
+#undef __entry
+#define __entry entry
+
+#undef __get_dynamic_array
+#define __get_dynamic_array(field) \
+ ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
+
+#undef __get_str
+#define __get_str(field) (char *)__get_dynamic_array(field)
+
+#undef __perf_addr
+#define __perf_addr(a) __addr = (a)
+
+#undef __perf_count
+#define __perf_count(c) __count = (c)
+
+#undef DECLARE_EVENT_CLASS
+#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
+static notrace void \
+perf_trace_##call(void *__data, proto) \
+{ \
+ struct ftrace_event_call *event_call = __data; \
+ struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
+ struct ftrace_raw_##call *entry; \
+ struct pt_regs __regs; \
+ u64 __addr = 0, __count = 1; \
+ struct hlist_head *head; \
+ int __entry_size; \
+ int __data_size; \
+ int rctx; \
+ \
+ perf_fetch_caller_regs(&__regs); \
+ \
+ __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
+ __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
+ sizeof(u64)); \
+ __entry_size -= sizeof(u32); \
+ \
+ if (WARN_ONCE(__entry_size > PERF_MAX_TRACE_SIZE, \
+ "profile buffer not large enough")) \
+ return; \
+ \
+ entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare( \
+ __entry_size, event_call->event.type, &__regs, &rctx); \
+ if (!entry) \
+ return; \
+ \
+ tstruct \
+ \
+ { assign; } \
+ \
+ head = this_cpu_ptr(event_call->perf_events); \
+ perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \
+ __count, &__regs, head); \
+}
+
+/*
+ * This part is compiled out, it is only here as a build time check
+ * to make sure that if the tracepoint handling changes, the
+ * perf probe will fail to compile unless it too is updated.
+ */
+#undef DEFINE_EVENT
+#define DEFINE_EVENT(template, call, proto, args) \
+static inline void perf_test_probe_##call(void) \
+{ \
+ check_trace_callback_type_##call(perf_trace_##template); \
+}
+
+
+#undef DEFINE_EVENT_PRINT
+#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
+ DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+#endif /* CONFIG_PERF_EVENTS */
+
+#undef _TRACE_PROFILE_INIT
+
diff --git a/include/trace/syscall.h b/include/trace/syscall.h
new file mode 100644
index 00000000..31966a4f
--- /dev/null
+++ b/include/trace/syscall.h
@@ -0,0 +1,57 @@
+#ifndef _TRACE_SYSCALL_H
+#define _TRACE_SYSCALL_H
+
+#include <linux/tracepoint.h>
+#include <linux/unistd.h>
+#include <linux/ftrace_event.h>
+
+#include <asm/ptrace.h>
+
+
+/*
+ * A syscall entry in the ftrace syscalls array.
+ *
+ * @name: name of the syscall
+ * @syscall_nr: number of the syscall
+ * @nb_args: number of parameters it takes
+ * @types: list of types as strings
+ * @args: list of args as strings (args[i] matches types[i])
+ * @enter_event: associated syscall_enter trace event
+ * @exit_event: associated syscall_exit trace event
+ */
+struct syscall_metadata {
+ const char *name;
+ int syscall_nr;
+ int nb_args;
+ const char **types;
+ const char **args;
+ struct list_head enter_fields;
+
+ struct ftrace_event_call *enter_event;
+ struct ftrace_event_call *exit_event;
+};
+
+#ifdef CONFIG_FTRACE_SYSCALLS
+extern unsigned long arch_syscall_addr(int nr);
+extern int init_syscall_trace(struct ftrace_event_call *call);
+
+extern int reg_event_syscall_enter(struct ftrace_event_call *call);
+extern void unreg_event_syscall_enter(struct ftrace_event_call *call);
+extern int reg_event_syscall_exit(struct ftrace_event_call *call);
+extern void unreg_event_syscall_exit(struct ftrace_event_call *call);
+extern int
+ftrace_format_syscall(struct ftrace_event_call *call, struct trace_seq *s);
+enum print_line_t print_syscall_enter(struct trace_iterator *iter, int flags,
+ struct trace_event *event);
+enum print_line_t print_syscall_exit(struct trace_iterator *iter, int flags,
+ struct trace_event *event);
+#endif
+
+#ifdef CONFIG_PERF_EVENTS
+int perf_sysenter_enable(struct ftrace_event_call *call);
+void perf_sysenter_disable(struct ftrace_event_call *call);
+int perf_sysexit_enable(struct ftrace_event_call *call);
+void perf_sysexit_disable(struct ftrace_event_call *call);
+#endif
+
+#endif /* _TRACE_SYSCALL_H */