1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Object lifetime handling and tracing.
3  *
4  * Copyright (C) 2022 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 
8 #include <linux/slab.h>
9 #include <linux/mempool.h>
10 #include <linux/delay.h>
11 #include "internal.h"
12 
13 static void netfs_free_request(struct work_struct *work);
14 
15 /*
16  * Allocate an I/O request and initialise it.
17  */
18 struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
19 					     struct file *file,
20 					     loff_t start, size_t len,
21 					     enum netfs_io_origin origin)
22 {
23 	static atomic_t debug_ids;
24 	struct inode *inode = file ? file_inode(file) : mapping->host;
25 	struct netfs_inode *ctx = netfs_inode(inode);
26 	struct netfs_io_request *rreq;
27 	mempool_t *mempool = ctx->ops->request_pool ?: &netfs_request_pool;
28 	struct kmem_cache *cache = mempool->pool_data;
29 	int ret;
30 
31 	for (;;) {
32 		rreq = mempool_alloc(mempool, GFP_KERNEL);
33 		if (rreq)
34 			break;
35 		msleep(10);
36 	}
37 
38 	memset(rreq, 0, kmem_cache_size(cache));
39 	INIT_WORK(&rreq->cleanup_work, netfs_free_request);
40 	rreq->start	= start;
41 	rreq->len	= len;
42 	rreq->origin	= origin;
43 	rreq->netfs_ops	= ctx->ops;
44 	rreq->mapping	= mapping;
45 	rreq->inode	= inode;
46 	rreq->i_size	= i_size_read(inode);
47 	rreq->debug_id	= atomic_inc_return(&debug_ids);
48 	rreq->wsize	= INT_MAX;
49 	rreq->io_streams[0].sreq_max_len = ULONG_MAX;
50 	rreq->io_streams[0].sreq_max_segs = 0;
51 	spin_lock_init(&rreq->lock);
52 	INIT_LIST_HEAD(&rreq->io_streams[0].subrequests);
53 	INIT_LIST_HEAD(&rreq->io_streams[1].subrequests);
54 	init_waitqueue_head(&rreq->waitq);
55 	refcount_set(&rreq->ref, 2);
56 
57 	if (origin == NETFS_READAHEAD ||
58 	    origin == NETFS_READPAGE ||
59 	    origin == NETFS_READ_GAPS ||
60 	    origin == NETFS_READ_SINGLE ||
61 	    origin == NETFS_READ_FOR_WRITE ||
62 	    origin == NETFS_UNBUFFERED_READ ||
63 	    origin == NETFS_DIO_READ) {
64 		INIT_WORK(&rreq->work, netfs_read_collection_worker);
65 		rreq->io_streams[0].avail = true;
66 	} else {
67 		INIT_WORK(&rreq->work, netfs_write_collection_worker);
68 	}
69 
70 	__set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
71 	if (rreq->netfs_ops->init_request) {
72 		ret = rreq->netfs_ops->init_request(rreq, file);
73 		if (ret < 0) {
74 			mempool_free(rreq, rreq->netfs_ops->request_pool ?: &netfs_request_pool);
75 			return ERR_PTR(ret);
76 		}
77 	}
78 
79 	atomic_inc(&ctx->io_count);
80 	trace_netfs_rreq_ref(rreq->debug_id, refcount_read(&rreq->ref), netfs_rreq_trace_new);
81 	netfs_proc_add_rreq(rreq);
82 	netfs_stat(&netfs_n_rh_rreq);
83 	return rreq;
84 }
85 
86 void netfs_get_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what)
87 {
88 	int r;
89 
90 	__refcount_inc(&rreq->ref, &r);
91 	trace_netfs_rreq_ref(rreq->debug_id, r + 1, what);
92 }
93 
94 void netfs_clear_subrequests(struct netfs_io_request *rreq)
95 {
96 	struct netfs_io_subrequest *subreq;
97 	struct netfs_io_stream *stream;
98 	int s;
99 
100 	for (s = 0; s < ARRAY_SIZE(rreq->io_streams); s++) {
101 		stream = &rreq->io_streams[s];
102 		while (!list_empty(&stream->subrequests)) {
103 			subreq = list_first_entry(&stream->subrequests,
104 						  struct netfs_io_subrequest, rreq_link);
105 			list_del(&subreq->rreq_link);
106 			netfs_put_subrequest(subreq, netfs_sreq_trace_put_clear);
107 		}
108 	}
109 }
110 
111 static void netfs_free_request_rcu(struct rcu_head *rcu)
112 {
113 	struct netfs_io_request *rreq = container_of(rcu, struct netfs_io_request, rcu);
114 
115 	mempool_free(rreq, rreq->netfs_ops->request_pool ?: &netfs_request_pool);
116 	netfs_stat_d(&netfs_n_rh_rreq);
117 }
118 
119 static void netfs_free_request(struct work_struct *work)
120 {
121 	struct netfs_io_request *rreq =
122 		container_of(work, struct netfs_io_request, cleanup_work);
123 	struct netfs_inode *ictx = netfs_inode(rreq->inode);
124 	unsigned int i;
125 
126 	trace_netfs_rreq(rreq, netfs_rreq_trace_free);
127 
128 	/* Cancel/flush the result collection worker.  That does not carry a
129 	 * ref of its own, so we must wait for it somewhere.
130 	 */
131 	cancel_work_sync(&rreq->work);
132 
133 	netfs_proc_del_rreq(rreq);
134 	netfs_clear_subrequests(rreq);
135 	if (rreq->netfs_ops->free_request)
136 		rreq->netfs_ops->free_request(rreq);
137 	if (rreq->cache_resources.ops)
138 		rreq->cache_resources.ops->end_operation(&rreq->cache_resources);
139 	if (rreq->direct_bv) {
140 		for (i = 0; i < rreq->direct_bv_count; i++) {
141 			if (rreq->direct_bv[i].bv_page) {
142 				if (rreq->direct_bv_unpin)
143 					unpin_user_page(rreq->direct_bv[i].bv_page);
144 			}
145 		}
146 		kvfree(rreq->direct_bv);
147 	}
148 	rolling_buffer_clear(&rreq->buffer);
149 
150 	if (atomic_dec_and_test(&ictx->io_count))
151 		wake_up_var(&ictx->io_count);
152 	call_rcu(&rreq->rcu, netfs_free_request_rcu);
153 }
154 
155 void netfs_put_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what)
156 {
157 	unsigned int debug_id;
158 	bool dead;
159 	int r;
160 
161 	if (rreq) {
162 		debug_id = rreq->debug_id;
163 		dead = __refcount_dec_and_test(&rreq->ref, &r);
164 		trace_netfs_rreq_ref(debug_id, r - 1, what);
165 		if (dead)
166 			WARN_ON(!queue_work(system_unbound_wq, &rreq->cleanup_work));
167 	}
168 }
169 
170 /*
171  * Allocate and partially initialise an I/O request structure.
172  */
173 struct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq)
174 {
175 	struct netfs_io_subrequest *subreq;
176 	mempool_t *mempool = rreq->netfs_ops->subrequest_pool ?: &netfs_subrequest_pool;
177 	struct kmem_cache *cache = mempool->pool_data;
178 
179 	for (;;) {
180 		subreq = mempool_alloc(rreq->netfs_ops->subrequest_pool ?: &netfs_subrequest_pool,
181 				       GFP_KERNEL);
182 		if (subreq)
183 			break;
184 		msleep(10);
185 	}
186 
187 	memset(subreq, 0, kmem_cache_size(cache));
188 	INIT_WORK(&subreq->work, NULL);
189 	INIT_LIST_HEAD(&subreq->rreq_link);
190 	refcount_set(&subreq->ref, 2);
191 	subreq->rreq = rreq;
192 	subreq->debug_index = atomic_inc_return(&rreq->subreq_counter);
193 	netfs_get_request(rreq, netfs_rreq_trace_get_subreq);
194 	netfs_stat(&netfs_n_rh_sreq);
195 	return subreq;
196 }
197 
198 void netfs_get_subrequest(struct netfs_io_subrequest *subreq,
199 			  enum netfs_sreq_ref_trace what)
200 {
201 	int r;
202 
203 	__refcount_inc(&subreq->ref, &r);
204 	trace_netfs_sreq_ref(subreq->rreq->debug_id, subreq->debug_index, r + 1,
205 			     what);
206 }
207 
208 static void netfs_free_subrequest(struct netfs_io_subrequest *subreq)
209 {
210 	struct netfs_io_request *rreq = subreq->rreq;
211 
212 	trace_netfs_sreq(subreq, netfs_sreq_trace_free);
213 	if (rreq->netfs_ops->free_subrequest)
214 		rreq->netfs_ops->free_subrequest(subreq);
215 	mempool_free(subreq, rreq->netfs_ops->subrequest_pool ?: &netfs_subrequest_pool);
216 	netfs_stat_d(&netfs_n_rh_sreq);
217 	netfs_put_request(rreq, netfs_rreq_trace_put_subreq);
218 }
219 
220 void netfs_put_subrequest(struct netfs_io_subrequest *subreq,
221 			  enum netfs_sreq_ref_trace what)
222 {
223 	unsigned int debug_index = subreq->debug_index;
224 	unsigned int debug_id = subreq->rreq->debug_id;
225 	bool dead;
226 	int r;
227 
228 	dead = __refcount_dec_and_test(&subreq->ref, &r);
229 	trace_netfs_sreq_ref(debug_id, debug_index, r - 1, what);
230 	if (dead)
231 		netfs_free_subrequest(subreq);
232 }
233