xref: /linux/fs/dax.c (revision fc284d631894d8673d229fad92762b66c9875cab)
1d475c634SMatthew Wilcox /*
2d475c634SMatthew Wilcox  * fs/dax.c - Direct Access filesystem code
3d475c634SMatthew Wilcox  * Copyright (c) 2013-2014 Intel Corporation
4d475c634SMatthew Wilcox  * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
5d475c634SMatthew Wilcox  * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
6d475c634SMatthew Wilcox  *
7d475c634SMatthew Wilcox  * This program is free software; you can redistribute it and/or modify it
8d475c634SMatthew Wilcox  * under the terms and conditions of the GNU General Public License,
9d475c634SMatthew Wilcox  * version 2, as published by the Free Software Foundation.
10d475c634SMatthew Wilcox  *
11d475c634SMatthew Wilcox  * This program is distributed in the hope it will be useful, but WITHOUT
12d475c634SMatthew Wilcox  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13d475c634SMatthew Wilcox  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14d475c634SMatthew Wilcox  * more details.
15d475c634SMatthew Wilcox  */
16d475c634SMatthew Wilcox 
17d475c634SMatthew Wilcox #include <linux/atomic.h>
18d475c634SMatthew Wilcox #include <linux/blkdev.h>
19d475c634SMatthew Wilcox #include <linux/buffer_head.h>
20d77e92e2SRoss Zwisler #include <linux/dax.h>
21d475c634SMatthew Wilcox #include <linux/fs.h>
22d475c634SMatthew Wilcox #include <linux/genhd.h>
23f7ca90b1SMatthew Wilcox #include <linux/highmem.h>
24f7ca90b1SMatthew Wilcox #include <linux/memcontrol.h>
25f7ca90b1SMatthew Wilcox #include <linux/mm.h>
26d475c634SMatthew Wilcox #include <linux/mutex.h>
272765cfbbSRoss Zwisler #include <linux/pmem.h>
28289c6aedSMatthew Wilcox #include <linux/sched.h>
29d475c634SMatthew Wilcox #include <linux/uio.h>
30f7ca90b1SMatthew Wilcox #include <linux/vmstat.h>
31d475c634SMatthew Wilcox 
321ca19157SDave Chinner /*
331ca19157SDave Chinner  * dax_clear_blocks() is called from within transaction context from XFS,
341ca19157SDave Chinner  * and hence this means the stack from this point must follow GFP_NOFS
351ca19157SDave Chinner  * semantics for all operations.
361ca19157SDave Chinner  */
37289c6aedSMatthew Wilcox int dax_clear_blocks(struct inode *inode, sector_t block, long size)
38289c6aedSMatthew Wilcox {
39289c6aedSMatthew Wilcox 	struct block_device *bdev = inode->i_sb->s_bdev;
40289c6aedSMatthew Wilcox 	sector_t sector = block << (inode->i_blkbits - 9);
41289c6aedSMatthew Wilcox 
42289c6aedSMatthew Wilcox 	might_sleep();
43289c6aedSMatthew Wilcox 	do {
44e2e05394SRoss Zwisler 		void __pmem *addr;
45289c6aedSMatthew Wilcox 		unsigned long pfn;
46289c6aedSMatthew Wilcox 		long count;
47289c6aedSMatthew Wilcox 
48289c6aedSMatthew Wilcox 		count = bdev_direct_access(bdev, sector, &addr, &pfn, size);
49289c6aedSMatthew Wilcox 		if (count < 0)
50289c6aedSMatthew Wilcox 			return count;
51289c6aedSMatthew Wilcox 		BUG_ON(size < count);
52289c6aedSMatthew Wilcox 		while (count > 0) {
53289c6aedSMatthew Wilcox 			unsigned pgsz = PAGE_SIZE - offset_in_page(addr);
54289c6aedSMatthew Wilcox 			if (pgsz > count)
55289c6aedSMatthew Wilcox 				pgsz = count;
56e2e05394SRoss Zwisler 			clear_pmem(addr, pgsz);
57289c6aedSMatthew Wilcox 			addr += pgsz;
58289c6aedSMatthew Wilcox 			size -= pgsz;
59289c6aedSMatthew Wilcox 			count -= pgsz;
60289c6aedSMatthew Wilcox 			BUG_ON(pgsz & 511);
61289c6aedSMatthew Wilcox 			sector += pgsz / 512;
62289c6aedSMatthew Wilcox 			cond_resched();
63289c6aedSMatthew Wilcox 		}
64289c6aedSMatthew Wilcox 	} while (size);
65289c6aedSMatthew Wilcox 
662765cfbbSRoss Zwisler 	wmb_pmem();
67289c6aedSMatthew Wilcox 	return 0;
68289c6aedSMatthew Wilcox }
69289c6aedSMatthew Wilcox EXPORT_SYMBOL_GPL(dax_clear_blocks);
70289c6aedSMatthew Wilcox 
71e2e05394SRoss Zwisler static long dax_get_addr(struct buffer_head *bh, void __pmem **addr,
72e2e05394SRoss Zwisler 		unsigned blkbits)
73d475c634SMatthew Wilcox {
74d475c634SMatthew Wilcox 	unsigned long pfn;
75d475c634SMatthew Wilcox 	sector_t sector = bh->b_blocknr << (blkbits - 9);
76d475c634SMatthew Wilcox 	return bdev_direct_access(bh->b_bdev, sector, addr, &pfn, bh->b_size);
77d475c634SMatthew Wilcox }
78d475c634SMatthew Wilcox 
792765cfbbSRoss Zwisler /* the clear_pmem() calls are ordered by a wmb_pmem() in the caller */
80e2e05394SRoss Zwisler static void dax_new_buf(void __pmem *addr, unsigned size, unsigned first,
81e2e05394SRoss Zwisler 		loff_t pos, loff_t end)
82d475c634SMatthew Wilcox {
83d475c634SMatthew Wilcox 	loff_t final = end - pos + first; /* The final byte of the buffer */
84d475c634SMatthew Wilcox 
85d475c634SMatthew Wilcox 	if (first > 0)
86e2e05394SRoss Zwisler 		clear_pmem(addr, first);
87d475c634SMatthew Wilcox 	if (final < size)
88e2e05394SRoss Zwisler 		clear_pmem(addr + final, size - final);
89d475c634SMatthew Wilcox }
90d475c634SMatthew Wilcox 
91d475c634SMatthew Wilcox static bool buffer_written(struct buffer_head *bh)
92d475c634SMatthew Wilcox {
93d475c634SMatthew Wilcox 	return buffer_mapped(bh) && !buffer_unwritten(bh);
94d475c634SMatthew Wilcox }
95d475c634SMatthew Wilcox 
96d475c634SMatthew Wilcox /*
97d475c634SMatthew Wilcox  * When ext4 encounters a hole, it returns without modifying the buffer_head
98d475c634SMatthew Wilcox  * which means that we can't trust b_size.  To cope with this, we set b_state
99d475c634SMatthew Wilcox  * to 0 before calling get_block and, if any bit is set, we know we can trust
100d475c634SMatthew Wilcox  * b_size.  Unfortunate, really, since ext4 knows precisely how long a hole is
101d475c634SMatthew Wilcox  * and would save us time calling get_block repeatedly.
102d475c634SMatthew Wilcox  */
103d475c634SMatthew Wilcox static bool buffer_size_valid(struct buffer_head *bh)
104d475c634SMatthew Wilcox {
105d475c634SMatthew Wilcox 	return bh->b_state != 0;
106d475c634SMatthew Wilcox }
107d475c634SMatthew Wilcox 
108a95cd631SOmar Sandoval static ssize_t dax_io(struct inode *inode, struct iov_iter *iter,
109d475c634SMatthew Wilcox 		      loff_t start, loff_t end, get_block_t get_block,
110d475c634SMatthew Wilcox 		      struct buffer_head *bh)
111d475c634SMatthew Wilcox {
112d475c634SMatthew Wilcox 	ssize_t retval = 0;
113d475c634SMatthew Wilcox 	loff_t pos = start;
114d475c634SMatthew Wilcox 	loff_t max = start;
115d475c634SMatthew Wilcox 	loff_t bh_max = start;
116e2e05394SRoss Zwisler 	void __pmem *addr;
117d475c634SMatthew Wilcox 	bool hole = false;
1182765cfbbSRoss Zwisler 	bool need_wmb = false;
119d475c634SMatthew Wilcox 
120a95cd631SOmar Sandoval 	if (iov_iter_rw(iter) != WRITE)
121d475c634SMatthew Wilcox 		end = min(end, i_size_read(inode));
122d475c634SMatthew Wilcox 
123d475c634SMatthew Wilcox 	while (pos < end) {
1242765cfbbSRoss Zwisler 		size_t len;
125d475c634SMatthew Wilcox 		if (pos == max) {
126d475c634SMatthew Wilcox 			unsigned blkbits = inode->i_blkbits;
127e94f5a22SJeff Moyer 			long page = pos >> PAGE_SHIFT;
128e94f5a22SJeff Moyer 			sector_t block = page << (PAGE_SHIFT - blkbits);
129d475c634SMatthew Wilcox 			unsigned first = pos - (block << blkbits);
130d475c634SMatthew Wilcox 			long size;
131d475c634SMatthew Wilcox 
132d475c634SMatthew Wilcox 			if (pos == bh_max) {
133d475c634SMatthew Wilcox 				bh->b_size = PAGE_ALIGN(end - pos);
134d475c634SMatthew Wilcox 				bh->b_state = 0;
135d475c634SMatthew Wilcox 				retval = get_block(inode, block, bh,
136a95cd631SOmar Sandoval 						   iov_iter_rw(iter) == WRITE);
137d475c634SMatthew Wilcox 				if (retval)
138d475c634SMatthew Wilcox 					break;
139d475c634SMatthew Wilcox 				if (!buffer_size_valid(bh))
140d475c634SMatthew Wilcox 					bh->b_size = 1 << blkbits;
141d475c634SMatthew Wilcox 				bh_max = pos - first + bh->b_size;
142d475c634SMatthew Wilcox 			} else {
143d475c634SMatthew Wilcox 				unsigned done = bh->b_size -
144d475c634SMatthew Wilcox 						(bh_max - (pos - first));
145d475c634SMatthew Wilcox 				bh->b_blocknr += done >> blkbits;
146d475c634SMatthew Wilcox 				bh->b_size -= done;
147d475c634SMatthew Wilcox 			}
148d475c634SMatthew Wilcox 
149a95cd631SOmar Sandoval 			hole = iov_iter_rw(iter) != WRITE && !buffer_written(bh);
150d475c634SMatthew Wilcox 			if (hole) {
151d475c634SMatthew Wilcox 				addr = NULL;
152d475c634SMatthew Wilcox 				size = bh->b_size - first;
153d475c634SMatthew Wilcox 			} else {
154d475c634SMatthew Wilcox 				retval = dax_get_addr(bh, &addr, blkbits);
155d475c634SMatthew Wilcox 				if (retval < 0)
156d475c634SMatthew Wilcox 					break;
1572765cfbbSRoss Zwisler 				if (buffer_unwritten(bh) || buffer_new(bh)) {
158d475c634SMatthew Wilcox 					dax_new_buf(addr, retval, first, pos,
159d475c634SMatthew Wilcox 									end);
1602765cfbbSRoss Zwisler 					need_wmb = true;
1612765cfbbSRoss Zwisler 				}
162d475c634SMatthew Wilcox 				addr += first;
163d475c634SMatthew Wilcox 				size = retval - first;
164d475c634SMatthew Wilcox 			}
165d475c634SMatthew Wilcox 			max = min(pos + size, end);
166d475c634SMatthew Wilcox 		}
167d475c634SMatthew Wilcox 
1682765cfbbSRoss Zwisler 		if (iov_iter_rw(iter) == WRITE) {
169e2e05394SRoss Zwisler 			len = copy_from_iter_pmem(addr, max - pos, iter);
1702765cfbbSRoss Zwisler 			need_wmb = true;
1712765cfbbSRoss Zwisler 		} else if (!hole)
172e2e05394SRoss Zwisler 			len = copy_to_iter((void __force *)addr, max - pos,
173e2e05394SRoss Zwisler 					iter);
174d475c634SMatthew Wilcox 		else
175d475c634SMatthew Wilcox 			len = iov_iter_zero(max - pos, iter);
176d475c634SMatthew Wilcox 
177cadfbb6eSAl Viro 		if (!len) {
178cadfbb6eSAl Viro 			retval = -EFAULT;
179d475c634SMatthew Wilcox 			break;
180cadfbb6eSAl Viro 		}
181d475c634SMatthew Wilcox 
182d475c634SMatthew Wilcox 		pos += len;
183d475c634SMatthew Wilcox 		addr += len;
184d475c634SMatthew Wilcox 	}
185d475c634SMatthew Wilcox 
1862765cfbbSRoss Zwisler 	if (need_wmb)
1872765cfbbSRoss Zwisler 		wmb_pmem();
1882765cfbbSRoss Zwisler 
189d475c634SMatthew Wilcox 	return (pos == start) ? retval : pos - start;
190d475c634SMatthew Wilcox }
191d475c634SMatthew Wilcox 
192d475c634SMatthew Wilcox /**
193d475c634SMatthew Wilcox  * dax_do_io - Perform I/O to a DAX file
194d475c634SMatthew Wilcox  * @iocb: The control block for this I/O
195d475c634SMatthew Wilcox  * @inode: The file which the I/O is directed at
196d475c634SMatthew Wilcox  * @iter: The addresses to do I/O from or to
197d475c634SMatthew Wilcox  * @pos: The file offset where the I/O starts
198d475c634SMatthew Wilcox  * @get_block: The filesystem method used to translate file offsets to blocks
199d475c634SMatthew Wilcox  * @end_io: A filesystem callback for I/O completion
200d475c634SMatthew Wilcox  * @flags: See below
201d475c634SMatthew Wilcox  *
202d475c634SMatthew Wilcox  * This function uses the same locking scheme as do_blockdev_direct_IO:
203d475c634SMatthew Wilcox  * If @flags has DIO_LOCKING set, we assume that the i_mutex is held by the
204d475c634SMatthew Wilcox  * caller for writes.  For reads, we take and release the i_mutex ourselves.
205d475c634SMatthew Wilcox  * If DIO_LOCKING is not set, the filesystem takes care of its own locking.
206d475c634SMatthew Wilcox  * As with do_blockdev_direct_IO(), we increment i_dio_count while the I/O
207d475c634SMatthew Wilcox  * is in progress.
208d475c634SMatthew Wilcox  */
209a95cd631SOmar Sandoval ssize_t dax_do_io(struct kiocb *iocb, struct inode *inode,
210a95cd631SOmar Sandoval 		  struct iov_iter *iter, loff_t pos, get_block_t get_block,
211a95cd631SOmar Sandoval 		  dio_iodone_t end_io, int flags)
212d475c634SMatthew Wilcox {
213d475c634SMatthew Wilcox 	struct buffer_head bh;
214d475c634SMatthew Wilcox 	ssize_t retval = -EINVAL;
215d475c634SMatthew Wilcox 	loff_t end = pos + iov_iter_count(iter);
216d475c634SMatthew Wilcox 
217d475c634SMatthew Wilcox 	memset(&bh, 0, sizeof(bh));
218d475c634SMatthew Wilcox 
219a95cd631SOmar Sandoval 	if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ) {
220d475c634SMatthew Wilcox 		struct address_space *mapping = inode->i_mapping;
221d475c634SMatthew Wilcox 		mutex_lock(&inode->i_mutex);
222d475c634SMatthew Wilcox 		retval = filemap_write_and_wait_range(mapping, pos, end - 1);
223d475c634SMatthew Wilcox 		if (retval) {
224d475c634SMatthew Wilcox 			mutex_unlock(&inode->i_mutex);
225d475c634SMatthew Wilcox 			goto out;
226d475c634SMatthew Wilcox 		}
227d475c634SMatthew Wilcox 	}
228d475c634SMatthew Wilcox 
229d475c634SMatthew Wilcox 	/* Protects against truncate */
230bbab37ddSMatthew Wilcox 	if (!(flags & DIO_SKIP_DIO_COUNT))
231fe0f07d0SJens Axboe 		inode_dio_begin(inode);
232d475c634SMatthew Wilcox 
233a95cd631SOmar Sandoval 	retval = dax_io(inode, iter, pos, end, get_block, &bh);
234d475c634SMatthew Wilcox 
235a95cd631SOmar Sandoval 	if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ)
236d475c634SMatthew Wilcox 		mutex_unlock(&inode->i_mutex);
237d475c634SMatthew Wilcox 
238d475c634SMatthew Wilcox 	if ((retval > 0) && end_io)
239d475c634SMatthew Wilcox 		end_io(iocb, pos, retval, bh.b_private);
240d475c634SMatthew Wilcox 
241bbab37ddSMatthew Wilcox 	if (!(flags & DIO_SKIP_DIO_COUNT))
242fe0f07d0SJens Axboe 		inode_dio_end(inode);
243d475c634SMatthew Wilcox  out:
244d475c634SMatthew Wilcox 	return retval;
245d475c634SMatthew Wilcox }
246d475c634SMatthew Wilcox EXPORT_SYMBOL_GPL(dax_do_io);
247f7ca90b1SMatthew Wilcox 
248f7ca90b1SMatthew Wilcox /*
249f7ca90b1SMatthew Wilcox  * The user has performed a load from a hole in the file.  Allocating
250f7ca90b1SMatthew Wilcox  * a new page in the file would cause excessive storage usage for
251f7ca90b1SMatthew Wilcox  * workloads with sparse files.  We allocate a page cache page instead.
252f7ca90b1SMatthew Wilcox  * We'll kick it out of the page cache if it's ever written to,
253f7ca90b1SMatthew Wilcox  * otherwise it will simply fall out of the page cache under memory
254f7ca90b1SMatthew Wilcox  * pressure without ever having been dirtied.
255f7ca90b1SMatthew Wilcox  */
256f7ca90b1SMatthew Wilcox static int dax_load_hole(struct address_space *mapping, struct page *page,
257f7ca90b1SMatthew Wilcox 							struct vm_fault *vmf)
258f7ca90b1SMatthew Wilcox {
259f7ca90b1SMatthew Wilcox 	unsigned long size;
260f7ca90b1SMatthew Wilcox 	struct inode *inode = mapping->host;
261f7ca90b1SMatthew Wilcox 	if (!page)
262f7ca90b1SMatthew Wilcox 		page = find_or_create_page(mapping, vmf->pgoff,
263f7ca90b1SMatthew Wilcox 						GFP_KERNEL | __GFP_ZERO);
264f7ca90b1SMatthew Wilcox 	if (!page)
265f7ca90b1SMatthew Wilcox 		return VM_FAULT_OOM;
266f7ca90b1SMatthew Wilcox 	/* Recheck i_size under page lock to avoid truncate race */
267f7ca90b1SMatthew Wilcox 	size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
268f7ca90b1SMatthew Wilcox 	if (vmf->pgoff >= size) {
269f7ca90b1SMatthew Wilcox 		unlock_page(page);
270f7ca90b1SMatthew Wilcox 		page_cache_release(page);
271f7ca90b1SMatthew Wilcox 		return VM_FAULT_SIGBUS;
272f7ca90b1SMatthew Wilcox 	}
273f7ca90b1SMatthew Wilcox 
274f7ca90b1SMatthew Wilcox 	vmf->page = page;
275f7ca90b1SMatthew Wilcox 	return VM_FAULT_LOCKED;
276f7ca90b1SMatthew Wilcox }
277f7ca90b1SMatthew Wilcox 
278f7ca90b1SMatthew Wilcox static int copy_user_bh(struct page *to, struct buffer_head *bh,
279f7ca90b1SMatthew Wilcox 			unsigned blkbits, unsigned long vaddr)
280f7ca90b1SMatthew Wilcox {
281e2e05394SRoss Zwisler 	void __pmem *vfrom;
282e2e05394SRoss Zwisler 	void *vto;
283e2e05394SRoss Zwisler 
284f7ca90b1SMatthew Wilcox 	if (dax_get_addr(bh, &vfrom, blkbits) < 0)
285f7ca90b1SMatthew Wilcox 		return -EIO;
286f7ca90b1SMatthew Wilcox 	vto = kmap_atomic(to);
287e2e05394SRoss Zwisler 	copy_user_page(vto, (void __force *)vfrom, vaddr, to);
288f7ca90b1SMatthew Wilcox 	kunmap_atomic(vto);
289f7ca90b1SMatthew Wilcox 	return 0;
290f7ca90b1SMatthew Wilcox }
291f7ca90b1SMatthew Wilcox 
292f7ca90b1SMatthew Wilcox static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
293f7ca90b1SMatthew Wilcox 			struct vm_area_struct *vma, struct vm_fault *vmf)
294f7ca90b1SMatthew Wilcox {
2950f90cc66SRoss Zwisler 	struct address_space *mapping = inode->i_mapping;
296f7ca90b1SMatthew Wilcox 	sector_t sector = bh->b_blocknr << (inode->i_blkbits - 9);
297f7ca90b1SMatthew Wilcox 	unsigned long vaddr = (unsigned long)vmf->virtual_address;
298e2e05394SRoss Zwisler 	void __pmem *addr;
299f7ca90b1SMatthew Wilcox 	unsigned long pfn;
300f7ca90b1SMatthew Wilcox 	pgoff_t size;
301f7ca90b1SMatthew Wilcox 	int error;
302f7ca90b1SMatthew Wilcox 
3030f90cc66SRoss Zwisler 	i_mmap_lock_read(mapping);
3040f90cc66SRoss Zwisler 
305f7ca90b1SMatthew Wilcox 	/*
306f7ca90b1SMatthew Wilcox 	 * Check truncate didn't happen while we were allocating a block.
307f7ca90b1SMatthew Wilcox 	 * If it did, this block may or may not be still allocated to the
308f7ca90b1SMatthew Wilcox 	 * file.  We can't tell the filesystem to free it because we can't
309f7ca90b1SMatthew Wilcox 	 * take i_mutex here.  In the worst case, the file still has blocks
310f7ca90b1SMatthew Wilcox 	 * allocated past the end of the file.
311f7ca90b1SMatthew Wilcox 	 */
312f7ca90b1SMatthew Wilcox 	size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
313f7ca90b1SMatthew Wilcox 	if (unlikely(vmf->pgoff >= size)) {
314f7ca90b1SMatthew Wilcox 		error = -EIO;
315f7ca90b1SMatthew Wilcox 		goto out;
316f7ca90b1SMatthew Wilcox 	}
317f7ca90b1SMatthew Wilcox 
318f7ca90b1SMatthew Wilcox 	error = bdev_direct_access(bh->b_bdev, sector, &addr, &pfn, bh->b_size);
319f7ca90b1SMatthew Wilcox 	if (error < 0)
320f7ca90b1SMatthew Wilcox 		goto out;
321f7ca90b1SMatthew Wilcox 	if (error < PAGE_SIZE) {
322f7ca90b1SMatthew Wilcox 		error = -EIO;
323f7ca90b1SMatthew Wilcox 		goto out;
324f7ca90b1SMatthew Wilcox 	}
325f7ca90b1SMatthew Wilcox 
3262765cfbbSRoss Zwisler 	if (buffer_unwritten(bh) || buffer_new(bh)) {
327e2e05394SRoss Zwisler 		clear_pmem(addr, PAGE_SIZE);
3282765cfbbSRoss Zwisler 		wmb_pmem();
3292765cfbbSRoss Zwisler 	}
330f7ca90b1SMatthew Wilcox 
331f7ca90b1SMatthew Wilcox 	error = vm_insert_mixed(vma, vaddr, pfn);
332f7ca90b1SMatthew Wilcox 
333f7ca90b1SMatthew Wilcox  out:
3340f90cc66SRoss Zwisler 	i_mmap_unlock_read(mapping);
3350f90cc66SRoss Zwisler 
336f7ca90b1SMatthew Wilcox 	return error;
337f7ca90b1SMatthew Wilcox }
338f7ca90b1SMatthew Wilcox 
339ce5c5d55SDave Chinner /**
340ce5c5d55SDave Chinner  * __dax_fault - handle a page fault on a DAX file
341ce5c5d55SDave Chinner  * @vma: The virtual memory area where the fault occurred
342ce5c5d55SDave Chinner  * @vmf: The description of the fault
343ce5c5d55SDave Chinner  * @get_block: The filesystem method used to translate file offsets to blocks
344b2442c5aSDave Chinner  * @complete_unwritten: The filesystem method used to convert unwritten blocks
345b2442c5aSDave Chinner  *	to written so the data written to them is exposed. This is required for
346b2442c5aSDave Chinner  *	required by write faults for filesystems that will return unwritten
347b2442c5aSDave Chinner  *	extent mappings from @get_block, but it is optional for reads as
348b2442c5aSDave Chinner  *	dax_insert_mapping() will always zero unwritten blocks. If the fs does
349b2442c5aSDave Chinner  *	not support unwritten extents, the it should pass NULL.
350ce5c5d55SDave Chinner  *
351ce5c5d55SDave Chinner  * When a page fault occurs, filesystems may call this helper in their
352ce5c5d55SDave Chinner  * fault handler for DAX files. __dax_fault() assumes the caller has done all
353ce5c5d55SDave Chinner  * the necessary locking for the page fault to proceed successfully.
354ce5c5d55SDave Chinner  */
355ce5c5d55SDave Chinner int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
356e842f290SDave Chinner 			get_block_t get_block, dax_iodone_t complete_unwritten)
357f7ca90b1SMatthew Wilcox {
358f7ca90b1SMatthew Wilcox 	struct file *file = vma->vm_file;
359f7ca90b1SMatthew Wilcox 	struct address_space *mapping = file->f_mapping;
360f7ca90b1SMatthew Wilcox 	struct inode *inode = mapping->host;
361f7ca90b1SMatthew Wilcox 	struct page *page;
362f7ca90b1SMatthew Wilcox 	struct buffer_head bh;
363f7ca90b1SMatthew Wilcox 	unsigned long vaddr = (unsigned long)vmf->virtual_address;
364f7ca90b1SMatthew Wilcox 	unsigned blkbits = inode->i_blkbits;
365f7ca90b1SMatthew Wilcox 	sector_t block;
366f7ca90b1SMatthew Wilcox 	pgoff_t size;
367f7ca90b1SMatthew Wilcox 	int error;
368f7ca90b1SMatthew Wilcox 	int major = 0;
369f7ca90b1SMatthew Wilcox 
370f7ca90b1SMatthew Wilcox 	size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
371f7ca90b1SMatthew Wilcox 	if (vmf->pgoff >= size)
372f7ca90b1SMatthew Wilcox 		return VM_FAULT_SIGBUS;
373f7ca90b1SMatthew Wilcox 
374f7ca90b1SMatthew Wilcox 	memset(&bh, 0, sizeof(bh));
375f7ca90b1SMatthew Wilcox 	block = (sector_t)vmf->pgoff << (PAGE_SHIFT - blkbits);
376f7ca90b1SMatthew Wilcox 	bh.b_size = PAGE_SIZE;
377f7ca90b1SMatthew Wilcox 
378f7ca90b1SMatthew Wilcox  repeat:
379f7ca90b1SMatthew Wilcox 	page = find_get_page(mapping, vmf->pgoff);
380f7ca90b1SMatthew Wilcox 	if (page) {
381f7ca90b1SMatthew Wilcox 		if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
382f7ca90b1SMatthew Wilcox 			page_cache_release(page);
383f7ca90b1SMatthew Wilcox 			return VM_FAULT_RETRY;
384f7ca90b1SMatthew Wilcox 		}
385f7ca90b1SMatthew Wilcox 		if (unlikely(page->mapping != mapping)) {
386f7ca90b1SMatthew Wilcox 			unlock_page(page);
387f7ca90b1SMatthew Wilcox 			page_cache_release(page);
388f7ca90b1SMatthew Wilcox 			goto repeat;
389f7ca90b1SMatthew Wilcox 		}
390f7ca90b1SMatthew Wilcox 		size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
391f7ca90b1SMatthew Wilcox 		if (unlikely(vmf->pgoff >= size)) {
392f7ca90b1SMatthew Wilcox 			/*
393f7ca90b1SMatthew Wilcox 			 * We have a struct page covering a hole in the file
394f7ca90b1SMatthew Wilcox 			 * from a read fault and we've raced with a truncate
395f7ca90b1SMatthew Wilcox 			 */
396f7ca90b1SMatthew Wilcox 			error = -EIO;
3970f90cc66SRoss Zwisler 			goto unlock_page;
398f7ca90b1SMatthew Wilcox 		}
399f7ca90b1SMatthew Wilcox 	}
400f7ca90b1SMatthew Wilcox 
401f7ca90b1SMatthew Wilcox 	error = get_block(inode, block, &bh, 0);
402f7ca90b1SMatthew Wilcox 	if (!error && (bh.b_size < PAGE_SIZE))
403f7ca90b1SMatthew Wilcox 		error = -EIO;		/* fs corruption? */
404f7ca90b1SMatthew Wilcox 	if (error)
4050f90cc66SRoss Zwisler 		goto unlock_page;
406f7ca90b1SMatthew Wilcox 
407f7ca90b1SMatthew Wilcox 	if (!buffer_mapped(&bh) && !buffer_unwritten(&bh) && !vmf->cow_page) {
408f7ca90b1SMatthew Wilcox 		if (vmf->flags & FAULT_FLAG_WRITE) {
409f7ca90b1SMatthew Wilcox 			error = get_block(inode, block, &bh, 1);
410f7ca90b1SMatthew Wilcox 			count_vm_event(PGMAJFAULT);
411f7ca90b1SMatthew Wilcox 			mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
412f7ca90b1SMatthew Wilcox 			major = VM_FAULT_MAJOR;
413f7ca90b1SMatthew Wilcox 			if (!error && (bh.b_size < PAGE_SIZE))
414f7ca90b1SMatthew Wilcox 				error = -EIO;
415f7ca90b1SMatthew Wilcox 			if (error)
4160f90cc66SRoss Zwisler 				goto unlock_page;
417f7ca90b1SMatthew Wilcox 		} else {
418f7ca90b1SMatthew Wilcox 			return dax_load_hole(mapping, page, vmf);
419f7ca90b1SMatthew Wilcox 		}
420f7ca90b1SMatthew Wilcox 	}
421f7ca90b1SMatthew Wilcox 
422f7ca90b1SMatthew Wilcox 	if (vmf->cow_page) {
423f7ca90b1SMatthew Wilcox 		struct page *new_page = vmf->cow_page;
424f7ca90b1SMatthew Wilcox 		if (buffer_written(&bh))
425f7ca90b1SMatthew Wilcox 			error = copy_user_bh(new_page, &bh, blkbits, vaddr);
426f7ca90b1SMatthew Wilcox 		else
427f7ca90b1SMatthew Wilcox 			clear_user_highpage(new_page, vaddr);
428f7ca90b1SMatthew Wilcox 		if (error)
4290f90cc66SRoss Zwisler 			goto unlock_page;
430f7ca90b1SMatthew Wilcox 		vmf->page = page;
431f7ca90b1SMatthew Wilcox 		if (!page) {
4320f90cc66SRoss Zwisler 			i_mmap_lock_read(mapping);
433f7ca90b1SMatthew Wilcox 			/* Check we didn't race with truncate */
434f7ca90b1SMatthew Wilcox 			size = (i_size_read(inode) + PAGE_SIZE - 1) >>
435f7ca90b1SMatthew Wilcox 								PAGE_SHIFT;
436f7ca90b1SMatthew Wilcox 			if (vmf->pgoff >= size) {
4370f90cc66SRoss Zwisler 				i_mmap_unlock_read(mapping);
438f7ca90b1SMatthew Wilcox 				error = -EIO;
4390f90cc66SRoss Zwisler 				goto out;
440f7ca90b1SMatthew Wilcox 			}
441f7ca90b1SMatthew Wilcox 		}
442f7ca90b1SMatthew Wilcox 		return VM_FAULT_LOCKED;
443f7ca90b1SMatthew Wilcox 	}
444f7ca90b1SMatthew Wilcox 
445f7ca90b1SMatthew Wilcox 	/* Check we didn't race with a read fault installing a new page */
446f7ca90b1SMatthew Wilcox 	if (!page && major)
447f7ca90b1SMatthew Wilcox 		page = find_lock_page(mapping, vmf->pgoff);
448f7ca90b1SMatthew Wilcox 
449f7ca90b1SMatthew Wilcox 	if (page) {
450f7ca90b1SMatthew Wilcox 		unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
451f7ca90b1SMatthew Wilcox 							PAGE_CACHE_SIZE, 0);
452f7ca90b1SMatthew Wilcox 		delete_from_page_cache(page);
453f7ca90b1SMatthew Wilcox 		unlock_page(page);
454f7ca90b1SMatthew Wilcox 		page_cache_release(page);
455f7ca90b1SMatthew Wilcox 	}
456f7ca90b1SMatthew Wilcox 
457e842f290SDave Chinner 	/*
458e842f290SDave Chinner 	 * If we successfully insert the new mapping over an unwritten extent,
459e842f290SDave Chinner 	 * we need to ensure we convert the unwritten extent. If there is an
460e842f290SDave Chinner 	 * error inserting the mapping, the filesystem needs to leave it as
461e842f290SDave Chinner 	 * unwritten to prevent exposure of the stale underlying data to
462e842f290SDave Chinner 	 * userspace, but we still need to call the completion function so
463e842f290SDave Chinner 	 * the private resources on the mapping buffer can be released. We
464e842f290SDave Chinner 	 * indicate what the callback should do via the uptodate variable, same
465e842f290SDave Chinner 	 * as for normal BH based IO completions.
466e842f290SDave Chinner 	 */
467f7ca90b1SMatthew Wilcox 	error = dax_insert_mapping(inode, &bh, vma, vmf);
468b2442c5aSDave Chinner 	if (buffer_unwritten(&bh)) {
469b2442c5aSDave Chinner 		if (complete_unwritten)
470e842f290SDave Chinner 			complete_unwritten(&bh, !error);
471b2442c5aSDave Chinner 		else
472b2442c5aSDave Chinner 			WARN_ON_ONCE(!(vmf->flags & FAULT_FLAG_WRITE));
473b2442c5aSDave Chinner 	}
474f7ca90b1SMatthew Wilcox 
475f7ca90b1SMatthew Wilcox  out:
476f7ca90b1SMatthew Wilcox 	if (error == -ENOMEM)
477f7ca90b1SMatthew Wilcox 		return VM_FAULT_OOM | major;
478f7ca90b1SMatthew Wilcox 	/* -EBUSY is fine, somebody else faulted on the same PTE */
479f7ca90b1SMatthew Wilcox 	if ((error < 0) && (error != -EBUSY))
480f7ca90b1SMatthew Wilcox 		return VM_FAULT_SIGBUS | major;
481f7ca90b1SMatthew Wilcox 	return VM_FAULT_NOPAGE | major;
482f7ca90b1SMatthew Wilcox 
4830f90cc66SRoss Zwisler  unlock_page:
484f7ca90b1SMatthew Wilcox 	if (page) {
485f7ca90b1SMatthew Wilcox 		unlock_page(page);
486f7ca90b1SMatthew Wilcox 		page_cache_release(page);
487f7ca90b1SMatthew Wilcox 	}
488f7ca90b1SMatthew Wilcox 	goto out;
489f7ca90b1SMatthew Wilcox }
490ce5c5d55SDave Chinner EXPORT_SYMBOL(__dax_fault);
491f7ca90b1SMatthew Wilcox 
492f7ca90b1SMatthew Wilcox /**
493f7ca90b1SMatthew Wilcox  * dax_fault - handle a page fault on a DAX file
494f7ca90b1SMatthew Wilcox  * @vma: The virtual memory area where the fault occurred
495f7ca90b1SMatthew Wilcox  * @vmf: The description of the fault
496f7ca90b1SMatthew Wilcox  * @get_block: The filesystem method used to translate file offsets to blocks
497f7ca90b1SMatthew Wilcox  *
498f7ca90b1SMatthew Wilcox  * When a page fault occurs, filesystems may call this helper in their
499f7ca90b1SMatthew Wilcox  * fault handler for DAX files.
500f7ca90b1SMatthew Wilcox  */
501f7ca90b1SMatthew Wilcox int dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
502e842f290SDave Chinner 	      get_block_t get_block, dax_iodone_t complete_unwritten)
503f7ca90b1SMatthew Wilcox {
504f7ca90b1SMatthew Wilcox 	int result;
505f7ca90b1SMatthew Wilcox 	struct super_block *sb = file_inode(vma->vm_file)->i_sb;
506f7ca90b1SMatthew Wilcox 
507f7ca90b1SMatthew Wilcox 	if (vmf->flags & FAULT_FLAG_WRITE) {
508f7ca90b1SMatthew Wilcox 		sb_start_pagefault(sb);
509f7ca90b1SMatthew Wilcox 		file_update_time(vma->vm_file);
510f7ca90b1SMatthew Wilcox 	}
511ce5c5d55SDave Chinner 	result = __dax_fault(vma, vmf, get_block, complete_unwritten);
512f7ca90b1SMatthew Wilcox 	if (vmf->flags & FAULT_FLAG_WRITE)
513f7ca90b1SMatthew Wilcox 		sb_end_pagefault(sb);
514f7ca90b1SMatthew Wilcox 
515f7ca90b1SMatthew Wilcox 	return result;
516f7ca90b1SMatthew Wilcox }
517f7ca90b1SMatthew Wilcox EXPORT_SYMBOL_GPL(dax_fault);
5184c0ccfefSMatthew Wilcox 
519844f35dbSMatthew Wilcox #ifdef CONFIG_TRANSPARENT_HUGEPAGE
520844f35dbSMatthew Wilcox /*
521844f35dbSMatthew Wilcox  * The 'colour' (ie low bits) within a PMD of a page offset.  This comes up
522844f35dbSMatthew Wilcox  * more often than one might expect in the below function.
523844f35dbSMatthew Wilcox  */
524844f35dbSMatthew Wilcox #define PG_PMD_COLOUR	((PMD_SIZE >> PAGE_SHIFT) - 1)
525844f35dbSMatthew Wilcox 
526844f35dbSMatthew Wilcox int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
527844f35dbSMatthew Wilcox 		pmd_t *pmd, unsigned int flags, get_block_t get_block,
528844f35dbSMatthew Wilcox 		dax_iodone_t complete_unwritten)
529844f35dbSMatthew Wilcox {
530844f35dbSMatthew Wilcox 	struct file *file = vma->vm_file;
531844f35dbSMatthew Wilcox 	struct address_space *mapping = file->f_mapping;
532844f35dbSMatthew Wilcox 	struct inode *inode = mapping->host;
533844f35dbSMatthew Wilcox 	struct buffer_head bh;
534844f35dbSMatthew Wilcox 	unsigned blkbits = inode->i_blkbits;
535844f35dbSMatthew Wilcox 	unsigned long pmd_addr = address & PMD_MASK;
536844f35dbSMatthew Wilcox 	bool write = flags & FAULT_FLAG_WRITE;
537844f35dbSMatthew Wilcox 	long length;
538d77e92e2SRoss Zwisler 	void __pmem *kaddr;
539844f35dbSMatthew Wilcox 	pgoff_t size, pgoff;
540844f35dbSMatthew Wilcox 	sector_t block, sector;
541844f35dbSMatthew Wilcox 	unsigned long pfn;
542844f35dbSMatthew Wilcox 	int result = 0;
543844f35dbSMatthew Wilcox 
544*ee82c9edSDan Williams 	/* dax pmd mappings are broken wrt gup and fork */
545*ee82c9edSDan Williams 	if (!IS_ENABLED(CONFIG_FS_DAX_PMD))
546*ee82c9edSDan Williams 		return VM_FAULT_FALLBACK;
547*ee82c9edSDan Williams 
548844f35dbSMatthew Wilcox 	/* Fall back to PTEs if we're going to COW */
549844f35dbSMatthew Wilcox 	if (write && !(vma->vm_flags & VM_SHARED))
550844f35dbSMatthew Wilcox 		return VM_FAULT_FALLBACK;
551844f35dbSMatthew Wilcox 	/* If the PMD would extend outside the VMA */
552844f35dbSMatthew Wilcox 	if (pmd_addr < vma->vm_start)
553844f35dbSMatthew Wilcox 		return VM_FAULT_FALLBACK;
554844f35dbSMatthew Wilcox 	if ((pmd_addr + PMD_SIZE) > vma->vm_end)
555844f35dbSMatthew Wilcox 		return VM_FAULT_FALLBACK;
556844f35dbSMatthew Wilcox 
5573fdd1b47SMatthew Wilcox 	pgoff = linear_page_index(vma, pmd_addr);
558844f35dbSMatthew Wilcox 	size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
559844f35dbSMatthew Wilcox 	if (pgoff >= size)
560844f35dbSMatthew Wilcox 		return VM_FAULT_SIGBUS;
561844f35dbSMatthew Wilcox 	/* If the PMD would cover blocks out of the file */
562844f35dbSMatthew Wilcox 	if ((pgoff | PG_PMD_COLOUR) >= size)
563844f35dbSMatthew Wilcox 		return VM_FAULT_FALLBACK;
564844f35dbSMatthew Wilcox 
565844f35dbSMatthew Wilcox 	memset(&bh, 0, sizeof(bh));
566844f35dbSMatthew Wilcox 	block = (sector_t)pgoff << (PAGE_SHIFT - blkbits);
567844f35dbSMatthew Wilcox 
568844f35dbSMatthew Wilcox 	bh.b_size = PMD_SIZE;
569844f35dbSMatthew Wilcox 	length = get_block(inode, block, &bh, write);
570844f35dbSMatthew Wilcox 	if (length)
571844f35dbSMatthew Wilcox 		return VM_FAULT_SIGBUS;
5720f90cc66SRoss Zwisler 	i_mmap_lock_read(mapping);
573844f35dbSMatthew Wilcox 
574844f35dbSMatthew Wilcox 	/*
575844f35dbSMatthew Wilcox 	 * If the filesystem isn't willing to tell us the length of a hole,
576844f35dbSMatthew Wilcox 	 * just fall back to PTEs.  Calling get_block 512 times in a loop
577844f35dbSMatthew Wilcox 	 * would be silly.
578844f35dbSMatthew Wilcox 	 */
579844f35dbSMatthew Wilcox 	if (!buffer_size_valid(&bh) || bh.b_size < PMD_SIZE)
580844f35dbSMatthew Wilcox 		goto fallback;
581844f35dbSMatthew Wilcox 
58246c043edSKirill A. Shutemov 	/*
58346c043edSKirill A. Shutemov 	 * If we allocated new storage, make sure no process has any
58446c043edSKirill A. Shutemov 	 * zero pages covering this hole
58546c043edSKirill A. Shutemov 	 */
58646c043edSKirill A. Shutemov 	if (buffer_new(&bh)) {
5870f90cc66SRoss Zwisler 		i_mmap_unlock_read(mapping);
58846c043edSKirill A. Shutemov 		unmap_mapping_range(mapping, pgoff << PAGE_SHIFT, PMD_SIZE, 0);
5890f90cc66SRoss Zwisler 		i_mmap_lock_read(mapping);
59046c043edSKirill A. Shutemov 	}
59146c043edSKirill A. Shutemov 
59284c4e5e6SMatthew Wilcox 	/*
59384c4e5e6SMatthew Wilcox 	 * If a truncate happened while we were allocating blocks, we may
59484c4e5e6SMatthew Wilcox 	 * leave blocks allocated to the file that are beyond EOF.  We can't
59584c4e5e6SMatthew Wilcox 	 * take i_mutex here, so just leave them hanging; they'll be freed
59684c4e5e6SMatthew Wilcox 	 * when the file is deleted.
59784c4e5e6SMatthew Wilcox 	 */
598844f35dbSMatthew Wilcox 	size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
599844f35dbSMatthew Wilcox 	if (pgoff >= size) {
600844f35dbSMatthew Wilcox 		result = VM_FAULT_SIGBUS;
601844f35dbSMatthew Wilcox 		goto out;
602844f35dbSMatthew Wilcox 	}
603844f35dbSMatthew Wilcox 	if ((pgoff | PG_PMD_COLOUR) >= size)
604844f35dbSMatthew Wilcox 		goto fallback;
605844f35dbSMatthew Wilcox 
606844f35dbSMatthew Wilcox 	if (!write && !buffer_mapped(&bh) && buffer_uptodate(&bh)) {
607844f35dbSMatthew Wilcox 		spinlock_t *ptl;
608d295e341SKirill A. Shutemov 		pmd_t entry;
609844f35dbSMatthew Wilcox 		struct page *zero_page = get_huge_zero_page();
610d295e341SKirill A. Shutemov 
611844f35dbSMatthew Wilcox 		if (unlikely(!zero_page))
612844f35dbSMatthew Wilcox 			goto fallback;
613844f35dbSMatthew Wilcox 
614d295e341SKirill A. Shutemov 		ptl = pmd_lock(vma->vm_mm, pmd);
615d295e341SKirill A. Shutemov 		if (!pmd_none(*pmd)) {
616844f35dbSMatthew Wilcox 			spin_unlock(ptl);
617d295e341SKirill A. Shutemov 			goto fallback;
618d295e341SKirill A. Shutemov 		}
619d295e341SKirill A. Shutemov 
620d295e341SKirill A. Shutemov 		entry = mk_pmd(zero_page, vma->vm_page_prot);
621d295e341SKirill A. Shutemov 		entry = pmd_mkhuge(entry);
622d295e341SKirill A. Shutemov 		set_pmd_at(vma->vm_mm, pmd_addr, pmd, entry);
623844f35dbSMatthew Wilcox 		result = VM_FAULT_NOPAGE;
624d295e341SKirill A. Shutemov 		spin_unlock(ptl);
625844f35dbSMatthew Wilcox 	} else {
6260f90cc66SRoss Zwisler 		sector = bh.b_blocknr << (blkbits - 9);
627844f35dbSMatthew Wilcox 		length = bdev_direct_access(bh.b_bdev, sector, &kaddr, &pfn,
628844f35dbSMatthew Wilcox 						bh.b_size);
629844f35dbSMatthew Wilcox 		if (length < 0) {
630844f35dbSMatthew Wilcox 			result = VM_FAULT_SIGBUS;
631844f35dbSMatthew Wilcox 			goto out;
632844f35dbSMatthew Wilcox 		}
633844f35dbSMatthew Wilcox 		if ((length < PMD_SIZE) || (pfn & PG_PMD_COLOUR))
634844f35dbSMatthew Wilcox 			goto fallback;
635844f35dbSMatthew Wilcox 
636152d7bd8SDan Williams 		/*
637152d7bd8SDan Williams 		 * TODO: teach vmf_insert_pfn_pmd() to support
638152d7bd8SDan Williams 		 * 'pte_special' for pmds
639152d7bd8SDan Williams 		 */
640152d7bd8SDan Williams 		if (pfn_valid(pfn))
641152d7bd8SDan Williams 			goto fallback;
642152d7bd8SDan Williams 
6430f90cc66SRoss Zwisler 		if (buffer_unwritten(&bh) || buffer_new(&bh)) {
6440f90cc66SRoss Zwisler 			int i;
6450f90cc66SRoss Zwisler 			for (i = 0; i < PTRS_PER_PMD; i++)
6460f90cc66SRoss Zwisler 				clear_pmem(kaddr + i * PAGE_SIZE, PAGE_SIZE);
6470f90cc66SRoss Zwisler 			wmb_pmem();
6480f90cc66SRoss Zwisler 			count_vm_event(PGMAJFAULT);
6490f90cc66SRoss Zwisler 			mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
6500f90cc66SRoss Zwisler 			result |= VM_FAULT_MAJOR;
6510f90cc66SRoss Zwisler 		}
6520f90cc66SRoss Zwisler 
653844f35dbSMatthew Wilcox 		result |= vmf_insert_pfn_pmd(vma, address, pmd, pfn, write);
654844f35dbSMatthew Wilcox 	}
655844f35dbSMatthew Wilcox 
656844f35dbSMatthew Wilcox  out:
6570f90cc66SRoss Zwisler 	i_mmap_unlock_read(mapping);
6580f90cc66SRoss Zwisler 
659844f35dbSMatthew Wilcox 	if (buffer_unwritten(&bh))
660844f35dbSMatthew Wilcox 		complete_unwritten(&bh, !(result & VM_FAULT_ERROR));
661844f35dbSMatthew Wilcox 
662844f35dbSMatthew Wilcox 	return result;
663844f35dbSMatthew Wilcox 
664844f35dbSMatthew Wilcox  fallback:
665844f35dbSMatthew Wilcox 	count_vm_event(THP_FAULT_FALLBACK);
666844f35dbSMatthew Wilcox 	result = VM_FAULT_FALLBACK;
667844f35dbSMatthew Wilcox 	goto out;
668844f35dbSMatthew Wilcox }
669844f35dbSMatthew Wilcox EXPORT_SYMBOL_GPL(__dax_pmd_fault);
670844f35dbSMatthew Wilcox 
671844f35dbSMatthew Wilcox /**
672844f35dbSMatthew Wilcox  * dax_pmd_fault - handle a PMD fault on a DAX file
673844f35dbSMatthew Wilcox  * @vma: The virtual memory area where the fault occurred
674844f35dbSMatthew Wilcox  * @vmf: The description of the fault
675844f35dbSMatthew Wilcox  * @get_block: The filesystem method used to translate file offsets to blocks
676844f35dbSMatthew Wilcox  *
677844f35dbSMatthew Wilcox  * When a page fault occurs, filesystems may call this helper in their
678844f35dbSMatthew Wilcox  * pmd_fault handler for DAX files.
679844f35dbSMatthew Wilcox  */
680844f35dbSMatthew Wilcox int dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
681844f35dbSMatthew Wilcox 			pmd_t *pmd, unsigned int flags, get_block_t get_block,
682844f35dbSMatthew Wilcox 			dax_iodone_t complete_unwritten)
683844f35dbSMatthew Wilcox {
684844f35dbSMatthew Wilcox 	int result;
685844f35dbSMatthew Wilcox 	struct super_block *sb = file_inode(vma->vm_file)->i_sb;
686844f35dbSMatthew Wilcox 
687844f35dbSMatthew Wilcox 	if (flags & FAULT_FLAG_WRITE) {
688844f35dbSMatthew Wilcox 		sb_start_pagefault(sb);
689844f35dbSMatthew Wilcox 		file_update_time(vma->vm_file);
690844f35dbSMatthew Wilcox 	}
691844f35dbSMatthew Wilcox 	result = __dax_pmd_fault(vma, address, pmd, flags, get_block,
692844f35dbSMatthew Wilcox 				complete_unwritten);
693844f35dbSMatthew Wilcox 	if (flags & FAULT_FLAG_WRITE)
694844f35dbSMatthew Wilcox 		sb_end_pagefault(sb);
695844f35dbSMatthew Wilcox 
696844f35dbSMatthew Wilcox 	return result;
697844f35dbSMatthew Wilcox }
698844f35dbSMatthew Wilcox EXPORT_SYMBOL_GPL(dax_pmd_fault);
699dd8a2b6cSValentin Rothberg #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
700844f35dbSMatthew Wilcox 
7014c0ccfefSMatthew Wilcox /**
7020e3b210cSBoaz Harrosh  * dax_pfn_mkwrite - handle first write to DAX page
7030e3b210cSBoaz Harrosh  * @vma: The virtual memory area where the fault occurred
7040e3b210cSBoaz Harrosh  * @vmf: The description of the fault
7050e3b210cSBoaz Harrosh  *
7060e3b210cSBoaz Harrosh  */
7070e3b210cSBoaz Harrosh int dax_pfn_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
7080e3b210cSBoaz Harrosh {
7090e3b210cSBoaz Harrosh 	struct super_block *sb = file_inode(vma->vm_file)->i_sb;
7100e3b210cSBoaz Harrosh 
7110e3b210cSBoaz Harrosh 	sb_start_pagefault(sb);
7120e3b210cSBoaz Harrosh 	file_update_time(vma->vm_file);
7130e3b210cSBoaz Harrosh 	sb_end_pagefault(sb);
7140e3b210cSBoaz Harrosh 	return VM_FAULT_NOPAGE;
7150e3b210cSBoaz Harrosh }
7160e3b210cSBoaz Harrosh EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);
7170e3b210cSBoaz Harrosh 
7180e3b210cSBoaz Harrosh /**
71925726bc1SMatthew Wilcox  * dax_zero_page_range - zero a range within a page of a DAX file
7204c0ccfefSMatthew Wilcox  * @inode: The file being truncated
7214c0ccfefSMatthew Wilcox  * @from: The file offset that is being truncated to
72225726bc1SMatthew Wilcox  * @length: The number of bytes to zero
7234c0ccfefSMatthew Wilcox  * @get_block: The filesystem method used to translate file offsets to blocks
7244c0ccfefSMatthew Wilcox  *
72525726bc1SMatthew Wilcox  * This function can be called by a filesystem when it is zeroing part of a
72625726bc1SMatthew Wilcox  * page in a DAX file.  This is intended for hole-punch operations.  If
72725726bc1SMatthew Wilcox  * you are truncating a file, the helper function dax_truncate_page() may be
72825726bc1SMatthew Wilcox  * more convenient.
7294c0ccfefSMatthew Wilcox  *
7304c0ccfefSMatthew Wilcox  * We work in terms of PAGE_CACHE_SIZE here for commonality with
7314c0ccfefSMatthew Wilcox  * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
7324c0ccfefSMatthew Wilcox  * took care of disposing of the unnecessary blocks.  Even if the filesystem
7334c0ccfefSMatthew Wilcox  * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
73425726bc1SMatthew Wilcox  * since the file might be mmapped.
7354c0ccfefSMatthew Wilcox  */
73625726bc1SMatthew Wilcox int dax_zero_page_range(struct inode *inode, loff_t from, unsigned length,
73725726bc1SMatthew Wilcox 							get_block_t get_block)
7384c0ccfefSMatthew Wilcox {
7394c0ccfefSMatthew Wilcox 	struct buffer_head bh;
7404c0ccfefSMatthew Wilcox 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
7414c0ccfefSMatthew Wilcox 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
7424c0ccfefSMatthew Wilcox 	int err;
7434c0ccfefSMatthew Wilcox 
7444c0ccfefSMatthew Wilcox 	/* Block boundary? Nothing to do */
7454c0ccfefSMatthew Wilcox 	if (!length)
7464c0ccfefSMatthew Wilcox 		return 0;
74725726bc1SMatthew Wilcox 	BUG_ON((offset + length) > PAGE_CACHE_SIZE);
7484c0ccfefSMatthew Wilcox 
7494c0ccfefSMatthew Wilcox 	memset(&bh, 0, sizeof(bh));
7504c0ccfefSMatthew Wilcox 	bh.b_size = PAGE_CACHE_SIZE;
7514c0ccfefSMatthew Wilcox 	err = get_block(inode, index, &bh, 0);
7524c0ccfefSMatthew Wilcox 	if (err < 0)
7534c0ccfefSMatthew Wilcox 		return err;
7544c0ccfefSMatthew Wilcox 	if (buffer_written(&bh)) {
755e2e05394SRoss Zwisler 		void __pmem *addr;
7564c0ccfefSMatthew Wilcox 		err = dax_get_addr(&bh, &addr, inode->i_blkbits);
7574c0ccfefSMatthew Wilcox 		if (err < 0)
7584c0ccfefSMatthew Wilcox 			return err;
759e2e05394SRoss Zwisler 		clear_pmem(addr + offset, length);
7602765cfbbSRoss Zwisler 		wmb_pmem();
7614c0ccfefSMatthew Wilcox 	}
7624c0ccfefSMatthew Wilcox 
7634c0ccfefSMatthew Wilcox 	return 0;
7644c0ccfefSMatthew Wilcox }
76525726bc1SMatthew Wilcox EXPORT_SYMBOL_GPL(dax_zero_page_range);
76625726bc1SMatthew Wilcox 
76725726bc1SMatthew Wilcox /**
76825726bc1SMatthew Wilcox  * dax_truncate_page - handle a partial page being truncated in a DAX file
76925726bc1SMatthew Wilcox  * @inode: The file being truncated
77025726bc1SMatthew Wilcox  * @from: The file offset that is being truncated to
77125726bc1SMatthew Wilcox  * @get_block: The filesystem method used to translate file offsets to blocks
77225726bc1SMatthew Wilcox  *
77325726bc1SMatthew Wilcox  * Similar to block_truncate_page(), this function can be called by a
77425726bc1SMatthew Wilcox  * filesystem when it is truncating a DAX file to handle the partial page.
77525726bc1SMatthew Wilcox  *
77625726bc1SMatthew Wilcox  * We work in terms of PAGE_CACHE_SIZE here for commonality with
77725726bc1SMatthew Wilcox  * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
77825726bc1SMatthew Wilcox  * took care of disposing of the unnecessary blocks.  Even if the filesystem
77925726bc1SMatthew Wilcox  * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
78025726bc1SMatthew Wilcox  * since the file might be mmapped.
78125726bc1SMatthew Wilcox  */
78225726bc1SMatthew Wilcox int dax_truncate_page(struct inode *inode, loff_t from, get_block_t get_block)
78325726bc1SMatthew Wilcox {
78425726bc1SMatthew Wilcox 	unsigned length = PAGE_CACHE_ALIGN(from) - from;
78525726bc1SMatthew Wilcox 	return dax_zero_page_range(inode, from, length, get_block);
78625726bc1SMatthew Wilcox }
7874c0ccfefSMatthew Wilcox EXPORT_SYMBOL_GPL(dax_truncate_page);
788