xref: /linux/fs/netfs/objects.c (revision 5fddfbc0cbc55a6b506f8cd07c58a152a3b535d6)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Object lifetime handling and tracing.
3  *
4  * Copyright (C) 2022 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 
8 #include <linux/slab.h>
9 #include <linux/mempool.h>
10 #include <linux/delay.h>
11 #include "internal.h"
12 
13 static void netfs_free_request(struct work_struct *work);
14 
15 /*
16  * Allocate an I/O request and initialise it.
17  */
18 struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
19 					     struct file *file,
20 					     loff_t start, size_t len,
21 					     enum netfs_io_origin origin)
22 {
23 	static atomic_t debug_ids;
24 	struct inode *inode = file ? file_inode(file) : mapping->host;
25 	struct netfs_inode *ctx = netfs_inode(inode);
26 	struct netfs_io_request *rreq;
27 	mempool_t *mempool = ctx->ops->request_pool ?: &netfs_request_pool;
28 	struct kmem_cache *cache = mempool->pool_data;
29 	int ret;
30 
31 	for (;;) {
32 		rreq = mempool_alloc(mempool, GFP_KERNEL);
33 		if (rreq)
34 			break;
35 		msleep(10);
36 	}
37 
38 	memset(rreq, 0, kmem_cache_size(cache));
39 	INIT_WORK(&rreq->cleanup_work, netfs_free_request);
40 	rreq->start	= start;
41 	rreq->len	= len;
42 	rreq->origin	= origin;
43 	rreq->netfs_ops	= ctx->ops;
44 	rreq->mapping	= mapping;
45 	rreq->inode	= inode;
46 	rreq->i_size	= i_size_read(inode);
47 	rreq->debug_id	= atomic_inc_return(&debug_ids);
48 	rreq->wsize	= INT_MAX;
49 	rreq->io_streams[0].sreq_max_len = ULONG_MAX;
50 	rreq->io_streams[0].sreq_max_segs = 0;
51 	spin_lock_init(&rreq->lock);
52 	INIT_LIST_HEAD(&rreq->io_streams[0].subrequests);
53 	INIT_LIST_HEAD(&rreq->io_streams[1].subrequests);
54 	init_waitqueue_head(&rreq->waitq);
55 	refcount_set(&rreq->ref, 2);
56 
57 	if (origin == NETFS_READAHEAD ||
58 	    origin == NETFS_READPAGE ||
59 	    origin == NETFS_READ_GAPS ||
60 	    origin == NETFS_READ_SINGLE ||
61 	    origin == NETFS_READ_FOR_WRITE ||
62 	    origin == NETFS_DIO_READ) {
63 		INIT_WORK(&rreq->work, netfs_read_collection_worker);
64 		rreq->io_streams[0].avail = true;
65 	} else {
66 		INIT_WORK(&rreq->work, netfs_write_collection_worker);
67 	}
68 
69 	__set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
70 	if (rreq->netfs_ops->init_request) {
71 		ret = rreq->netfs_ops->init_request(rreq, file);
72 		if (ret < 0) {
73 			mempool_free(rreq, rreq->netfs_ops->request_pool ?: &netfs_request_pool);
74 			return ERR_PTR(ret);
75 		}
76 	}
77 
78 	atomic_inc(&ctx->io_count);
79 	trace_netfs_rreq_ref(rreq->debug_id, refcount_read(&rreq->ref), netfs_rreq_trace_new);
80 	netfs_proc_add_rreq(rreq);
81 	netfs_stat(&netfs_n_rh_rreq);
82 	return rreq;
83 }
84 
85 void netfs_get_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what)
86 {
87 	int r;
88 
89 	__refcount_inc(&rreq->ref, &r);
90 	trace_netfs_rreq_ref(rreq->debug_id, r + 1, what);
91 }
92 
93 void netfs_clear_subrequests(struct netfs_io_request *rreq)
94 {
95 	struct netfs_io_subrequest *subreq;
96 	struct netfs_io_stream *stream;
97 	int s;
98 
99 	for (s = 0; s < ARRAY_SIZE(rreq->io_streams); s++) {
100 		stream = &rreq->io_streams[s];
101 		while (!list_empty(&stream->subrequests)) {
102 			subreq = list_first_entry(&stream->subrequests,
103 						  struct netfs_io_subrequest, rreq_link);
104 			list_del(&subreq->rreq_link);
105 			netfs_put_subrequest(subreq, netfs_sreq_trace_put_clear);
106 		}
107 	}
108 }
109 
110 static void netfs_free_request_rcu(struct rcu_head *rcu)
111 {
112 	struct netfs_io_request *rreq = container_of(rcu, struct netfs_io_request, rcu);
113 
114 	mempool_free(rreq, rreq->netfs_ops->request_pool ?: &netfs_request_pool);
115 	netfs_stat_d(&netfs_n_rh_rreq);
116 }
117 
118 static void netfs_free_request(struct work_struct *work)
119 {
120 	struct netfs_io_request *rreq =
121 		container_of(work, struct netfs_io_request, cleanup_work);
122 	struct netfs_inode *ictx = netfs_inode(rreq->inode);
123 	unsigned int i;
124 
125 	trace_netfs_rreq(rreq, netfs_rreq_trace_free);
126 
127 	/* Cancel/flush the result collection worker.  That does not carry a
128 	 * ref of its own, so we must wait for it somewhere.
129 	 */
130 	cancel_work_sync(&rreq->work);
131 
132 	netfs_proc_del_rreq(rreq);
133 	netfs_clear_subrequests(rreq);
134 	if (rreq->netfs_ops->free_request)
135 		rreq->netfs_ops->free_request(rreq);
136 	if (rreq->cache_resources.ops)
137 		rreq->cache_resources.ops->end_operation(&rreq->cache_resources);
138 	if (rreq->direct_bv) {
139 		for (i = 0; i < rreq->direct_bv_count; i++) {
140 			if (rreq->direct_bv[i].bv_page) {
141 				if (rreq->direct_bv_unpin)
142 					unpin_user_page(rreq->direct_bv[i].bv_page);
143 			}
144 		}
145 		kvfree(rreq->direct_bv);
146 	}
147 	rolling_buffer_clear(&rreq->buffer);
148 
149 	if (atomic_dec_and_test(&ictx->io_count))
150 		wake_up_var(&ictx->io_count);
151 	call_rcu(&rreq->rcu, netfs_free_request_rcu);
152 }
153 
154 void netfs_put_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what)
155 {
156 	unsigned int debug_id;
157 	bool dead;
158 	int r;
159 
160 	if (rreq) {
161 		debug_id = rreq->debug_id;
162 		dead = __refcount_dec_and_test(&rreq->ref, &r);
163 		trace_netfs_rreq_ref(debug_id, r - 1, what);
164 		if (dead)
165 			WARN_ON(!queue_work(system_unbound_wq, &rreq->cleanup_work));
166 	}
167 }
168 
169 /*
170  * Allocate and partially initialise an I/O request structure.
171  */
172 struct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq)
173 {
174 	struct netfs_io_subrequest *subreq;
175 	mempool_t *mempool = rreq->netfs_ops->subrequest_pool ?: &netfs_subrequest_pool;
176 	struct kmem_cache *cache = mempool->pool_data;
177 
178 	for (;;) {
179 		subreq = mempool_alloc(rreq->netfs_ops->subrequest_pool ?: &netfs_subrequest_pool,
180 				       GFP_KERNEL);
181 		if (subreq)
182 			break;
183 		msleep(10);
184 	}
185 
186 	memset(subreq, 0, kmem_cache_size(cache));
187 	INIT_WORK(&subreq->work, NULL);
188 	INIT_LIST_HEAD(&subreq->rreq_link);
189 	refcount_set(&subreq->ref, 2);
190 	subreq->rreq = rreq;
191 	subreq->debug_index = atomic_inc_return(&rreq->subreq_counter);
192 	netfs_get_request(rreq, netfs_rreq_trace_get_subreq);
193 	netfs_stat(&netfs_n_rh_sreq);
194 	return subreq;
195 }
196 
197 void netfs_get_subrequest(struct netfs_io_subrequest *subreq,
198 			  enum netfs_sreq_ref_trace what)
199 {
200 	int r;
201 
202 	__refcount_inc(&subreq->ref, &r);
203 	trace_netfs_sreq_ref(subreq->rreq->debug_id, subreq->debug_index, r + 1,
204 			     what);
205 }
206 
207 static void netfs_free_subrequest(struct netfs_io_subrequest *subreq)
208 {
209 	struct netfs_io_request *rreq = subreq->rreq;
210 
211 	trace_netfs_sreq(subreq, netfs_sreq_trace_free);
212 	if (rreq->netfs_ops->free_subrequest)
213 		rreq->netfs_ops->free_subrequest(subreq);
214 	mempool_free(subreq, rreq->netfs_ops->subrequest_pool ?: &netfs_subrequest_pool);
215 	netfs_stat_d(&netfs_n_rh_sreq);
216 	netfs_put_request(rreq, netfs_rreq_trace_put_subreq);
217 }
218 
219 void netfs_put_subrequest(struct netfs_io_subrequest *subreq,
220 			  enum netfs_sreq_ref_trace what)
221 {
222 	unsigned int debug_index = subreq->debug_index;
223 	unsigned int debug_id = subreq->rreq->debug_id;
224 	bool dead;
225 	int r;
226 
227 	dead = __refcount_dec_and_test(&subreq->ref, &r);
228 	trace_netfs_sreq_ref(debug_id, debug_index, r - 1, what);
229 	if (dead)
230 		netfs_free_subrequest(subreq);
231 }
232