1 // SPDX-License-Identifier: GPL-2.0-or-later
2 
3 #include <linux/kernel.h>
4 #include <linux/string.h>
5 #include <linux/err.h>
6 #include <linux/slab.h>
7 #include <linux/wait.h>
8 #include <linux/sched.h>
9 #include <linux/cpuhotplug.h>
10 #include <linux/vmalloc.h>
11 
12 #include "zcomp.h"
13 
14 #include "backend_lzo.h"
15 #include "backend_lzorle.h"
16 #include "backend_lz4.h"
17 #include "backend_lz4hc.h"
18 #include "backend_zstd.h"
19 #include "backend_deflate.h"
20 #include "backend_842.h"
21 
22 static const struct zcomp_ops *backends[] = {
23 #if IS_ENABLED(CONFIG_ZRAM_BACKEND_LZO)
24 	&backend_lzorle,
25 	&backend_lzo,
26 #endif
27 #if IS_ENABLED(CONFIG_ZRAM_BACKEND_LZ4)
28 	&backend_lz4,
29 #endif
30 #if IS_ENABLED(CONFIG_ZRAM_BACKEND_LZ4HC)
31 	&backend_lz4hc,
32 #endif
33 #if IS_ENABLED(CONFIG_ZRAM_BACKEND_ZSTD)
34 	&backend_zstd,
35 #endif
36 #if IS_ENABLED(CONFIG_ZRAM_BACKEND_DEFLATE)
37 	&backend_deflate,
38 #endif
39 #if IS_ENABLED(CONFIG_ZRAM_BACKEND_842)
40 	&backend_842,
41 #endif
42 	NULL
43 };
44 
zcomp_strm_free(struct zcomp * comp,struct zcomp_strm * zstrm)45 static void zcomp_strm_free(struct zcomp *comp, struct zcomp_strm *zstrm)
46 {
47 	comp->ops->destroy_ctx(&zstrm->ctx);
48 	vfree(zstrm->local_copy);
49 	vfree(zstrm->buffer);
50 	zstrm->buffer = NULL;
51 }
52 
zcomp_strm_init(struct zcomp * comp,struct zcomp_strm * zstrm)53 static int zcomp_strm_init(struct zcomp *comp, struct zcomp_strm *zstrm)
54 {
55 	int ret;
56 
57 	ret = comp->ops->create_ctx(comp->params, &zstrm->ctx);
58 	if (ret)
59 		return ret;
60 
61 	zstrm->local_copy = vzalloc(PAGE_SIZE);
62 	/*
63 	 * allocate 2 pages. 1 for compressed data, plus 1 extra for the
64 	 * case when compressed size is larger than the original one
65 	 */
66 	zstrm->buffer = vzalloc(2 * PAGE_SIZE);
67 	if (!zstrm->buffer || !zstrm->local_copy) {
68 		zcomp_strm_free(comp, zstrm);
69 		return -ENOMEM;
70 	}
71 	return 0;
72 }
73 
lookup_backend_ops(const char * comp)74 static const struct zcomp_ops *lookup_backend_ops(const char *comp)
75 {
76 	int i = 0;
77 
78 	while (backends[i]) {
79 		if (sysfs_streq(comp, backends[i]->name))
80 			break;
81 		i++;
82 	}
83 	return backends[i];
84 }
85 
zcomp_available_algorithm(const char * comp)86 bool zcomp_available_algorithm(const char *comp)
87 {
88 	return lookup_backend_ops(comp) != NULL;
89 }
90 
91 /* show available compressors */
zcomp_available_show(const char * comp,char * buf)92 ssize_t zcomp_available_show(const char *comp, char *buf)
93 {
94 	ssize_t sz = 0;
95 	int i;
96 
97 	for (i = 0; i < ARRAY_SIZE(backends) - 1; i++) {
98 		if (!strcmp(comp, backends[i]->name)) {
99 			sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2,
100 					"[%s] ", backends[i]->name);
101 		} else {
102 			sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2,
103 					"%s ", backends[i]->name);
104 		}
105 	}
106 
107 	sz += scnprintf(buf + sz, PAGE_SIZE - sz, "\n");
108 	return sz;
109 }
110 
zcomp_stream_get(struct zcomp * comp)111 struct zcomp_strm *zcomp_stream_get(struct zcomp *comp)
112 {
113 	for (;;) {
114 		struct zcomp_strm *zstrm = raw_cpu_ptr(comp->stream);
115 
116 		/*
117 		 * Inspired by zswap
118 		 *
119 		 * stream is returned with ->mutex locked which prevents
120 		 * cpu_dead() from releasing this stream under us, however
121 		 * there is still a race window between raw_cpu_ptr() and
122 		 * mutex_lock(), during which we could have been migrated
123 		 * from a CPU that has already destroyed its stream.  If
124 		 * so then unlock and re-try on the current CPU.
125 		 */
126 		mutex_lock(&zstrm->lock);
127 		if (likely(zstrm->buffer))
128 			return zstrm;
129 		mutex_unlock(&zstrm->lock);
130 	}
131 }
132 
zcomp_stream_put(struct zcomp_strm * zstrm)133 void zcomp_stream_put(struct zcomp_strm *zstrm)
134 {
135 	mutex_unlock(&zstrm->lock);
136 }
137 
zcomp_compress(struct zcomp * comp,struct zcomp_strm * zstrm,const void * src,unsigned int * dst_len)138 int zcomp_compress(struct zcomp *comp, struct zcomp_strm *zstrm,
139 		   const void *src, unsigned int *dst_len)
140 {
141 	struct zcomp_req req = {
142 		.src = src,
143 		.dst = zstrm->buffer,
144 		.src_len = PAGE_SIZE,
145 		.dst_len = 2 * PAGE_SIZE,
146 	};
147 	int ret;
148 
149 	might_sleep();
150 	ret = comp->ops->compress(comp->params, &zstrm->ctx, &req);
151 	if (!ret)
152 		*dst_len = req.dst_len;
153 	return ret;
154 }
155 
zcomp_decompress(struct zcomp * comp,struct zcomp_strm * zstrm,const void * src,unsigned int src_len,void * dst)156 int zcomp_decompress(struct zcomp *comp, struct zcomp_strm *zstrm,
157 		     const void *src, unsigned int src_len, void *dst)
158 {
159 	struct zcomp_req req = {
160 		.src = src,
161 		.dst = dst,
162 		.src_len = src_len,
163 		.dst_len = PAGE_SIZE,
164 	};
165 
166 	might_sleep();
167 	return comp->ops->decompress(comp->params, &zstrm->ctx, &req);
168 }
169 
zcomp_cpu_up_prepare(unsigned int cpu,struct hlist_node * node)170 int zcomp_cpu_up_prepare(unsigned int cpu, struct hlist_node *node)
171 {
172 	struct zcomp *comp = hlist_entry(node, struct zcomp, node);
173 	struct zcomp_strm *zstrm = per_cpu_ptr(comp->stream, cpu);
174 	int ret;
175 
176 	ret = zcomp_strm_init(comp, zstrm);
177 	if (ret)
178 		pr_err("Can't allocate a compression stream\n");
179 	return ret;
180 }
181 
zcomp_cpu_dead(unsigned int cpu,struct hlist_node * node)182 int zcomp_cpu_dead(unsigned int cpu, struct hlist_node *node)
183 {
184 	struct zcomp *comp = hlist_entry(node, struct zcomp, node);
185 	struct zcomp_strm *zstrm = per_cpu_ptr(comp->stream, cpu);
186 
187 	mutex_lock(&zstrm->lock);
188 	zcomp_strm_free(comp, zstrm);
189 	mutex_unlock(&zstrm->lock);
190 	return 0;
191 }
192 
zcomp_init(struct zcomp * comp,struct zcomp_params * params)193 static int zcomp_init(struct zcomp *comp, struct zcomp_params *params)
194 {
195 	int ret, cpu;
196 
197 	comp->stream = alloc_percpu(struct zcomp_strm);
198 	if (!comp->stream)
199 		return -ENOMEM;
200 
201 	comp->params = params;
202 	ret = comp->ops->setup_params(comp->params);
203 	if (ret)
204 		goto cleanup;
205 
206 	for_each_possible_cpu(cpu)
207 		mutex_init(&per_cpu_ptr(comp->stream, cpu)->lock);
208 
209 	ret = cpuhp_state_add_instance(CPUHP_ZCOMP_PREPARE, &comp->node);
210 	if (ret < 0)
211 		goto cleanup;
212 
213 	return 0;
214 
215 cleanup:
216 	comp->ops->release_params(comp->params);
217 	free_percpu(comp->stream);
218 	return ret;
219 }
220 
zcomp_destroy(struct zcomp * comp)221 void zcomp_destroy(struct zcomp *comp)
222 {
223 	cpuhp_state_remove_instance(CPUHP_ZCOMP_PREPARE, &comp->node);
224 	comp->ops->release_params(comp->params);
225 	free_percpu(comp->stream);
226 	kfree(comp);
227 }
228 
zcomp_create(const char * alg,struct zcomp_params * params)229 struct zcomp *zcomp_create(const char *alg, struct zcomp_params *params)
230 {
231 	struct zcomp *comp;
232 	int error;
233 
234 	/*
235 	 * The backends array has a sentinel NULL value, so the minimum
236 	 * size is 1. In order to be valid the array, apart from the
237 	 * sentinel NULL element, should have at least one compression
238 	 * backend selected.
239 	 */
240 	BUILD_BUG_ON(ARRAY_SIZE(backends) <= 1);
241 
242 	comp = kzalloc(sizeof(struct zcomp), GFP_KERNEL);
243 	if (!comp)
244 		return ERR_PTR(-ENOMEM);
245 
246 	comp->ops = lookup_backend_ops(alg);
247 	if (!comp->ops) {
248 		kfree(comp);
249 		return ERR_PTR(-EINVAL);
250 	}
251 
252 	error = zcomp_init(comp, params);
253 	if (error) {
254 		kfree(comp);
255 		return ERR_PTR(error);
256 	}
257 	return comp;
258 }
259