dax: add tracepoints to dax_load_hole()
authorRoss Zwisler <[email protected]>
Mon, 8 May 2017 23:00:07 +0000 (16:00 -0700)
committerLinus Torvalds <[email protected]>
Tue, 9 May 2017 00:15:16 +0000 (17:15 -0700)
Add tracepoints to dax_load_hole(), following the same logging conventions
as the rest of DAX.

Here is the logging generated by a PTE read from a hole:

  read-1075  [002] ....
    62.362108: dax_pte_fault: dev 259:0 ino 0x1003 shared ALLOW_RETRY|KILLABLE|USER address 0x10480000 pgoff 0x280

  read-1075  [002] ....
    62.362140: dax_load_hole: dev 259:0 ino 0x1003 shared ALLOW_RETRY|KILLABLE|USER address 0x10480000 pgoff 0x280 NOPAGE

  read-1075  [002] ....
    62.362141: dax_pte_fault_done: dev 259:0 ino 0x1003 shared ALLOW_RETRY|KILLABLE|USER address 0x10480000 pgoff 0x280 NOPAGE

Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ross Zwisler <[email protected]>
Reviewed-by: Jan Kara <[email protected]>
Cc: Alexander Viro <[email protected]>
Cc: Dan Williams <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Matthew Wilcox <[email protected]>
Cc: Steven Rostedt <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
fs/dax.c
include/trace/events/fs_dax.h

index d10524ab7e5548ac13402ae93b7c37b16df6c615..36fafff2e82fa54aa87e8304bcd6e6c17e42fe56 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -509,21 +509,25 @@ int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
 static int dax_load_hole(struct address_space *mapping, void **entry,
                         struct vm_fault *vmf)
 {
+       struct inode *inode = mapping->host;
        struct page *page;
        int ret;
 
        /* Hole page already exists? Return it...  */
        if (!radix_tree_exceptional_entry(*entry)) {
                page = *entry;
-               goto out;
+               goto finish_fault;
        }
 
        /* This will replace locked radix tree entry with a hole page */
        page = find_or_create_page(mapping, vmf->pgoff,
                                   vmf->gfp_mask | __GFP_ZERO);
-       if (!page)
-               return VM_FAULT_OOM;
- out:
+       if (!page) {
+               ret = VM_FAULT_OOM;
+               goto out;
+       }
+
+finish_fault:
        vmf->page = page;
        ret = finish_fault(vmf);
        vmf->page = NULL;
@@ -531,8 +535,10 @@ static int dax_load_hole(struct address_space *mapping, void **entry,
        if (!ret) {
                /* Grab reference for PTE that is now referencing the page */
                get_page(page);
-               return VM_FAULT_NOPAGE;
+               ret = VM_FAULT_NOPAGE;
        }
+out:
+       trace_dax_load_hole(inode, vmf, ret);
        return ret;
 }
 
index b5a520961f8d9958657ae872ab13b713a5a02f57..2f15dfea7fb17d8e2ce5368e1372de5d6741ccdd 100644 (file)
@@ -192,6 +192,7 @@ DEFINE_PTE_FAULT_EVENT(dax_pte_fault);
 DEFINE_PTE_FAULT_EVENT(dax_pte_fault_done);
 DEFINE_PTE_FAULT_EVENT(dax_pfn_mkwrite_no_entry);
 DEFINE_PTE_FAULT_EVENT(dax_pfn_mkwrite);
+DEFINE_PTE_FAULT_EVENT(dax_load_hole);
 
 #endif /* _TRACE_FS_DAX_H */