1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 #include <linux/kernel.h>
4 #include <linux/string.h>
5 #include <linux/err.h>
6 #include <linux/slab.h>
7 #include <linux/wait.h>
8 #include <linux/sched.h>
9 #include <linux/cpuhotplug.h>
10 #include <linux/vmalloc.h>
11 #include <linux/sysfs.h>
12
13 #include "zcomp.h"
14
15 #include "backend_lzo.h"
16 #include "backend_lzorle.h"
17 #include "backend_lz4.h"
18 #include "backend_lz4hc.h"
19 #include "backend_zstd.h"
20 #include "backend_deflate.h"
21 #include "backend_842.h"
22
23 static const struct zcomp_ops *backends[] = {
24 #if IS_ENABLED(CONFIG_ZRAM_BACKEND_LZO)
25 &backend_lzorle,
26 &backend_lzo,
27 #endif
28 #if IS_ENABLED(CONFIG_ZRAM_BACKEND_LZ4)
29 &backend_lz4,
30 #endif
31 #if IS_ENABLED(CONFIG_ZRAM_BACKEND_LZ4HC)
32 &backend_lz4hc,
33 #endif
34 #if IS_ENABLED(CONFIG_ZRAM_BACKEND_ZSTD)
35 &backend_zstd,
36 #endif
37 #if IS_ENABLED(CONFIG_ZRAM_BACKEND_DEFLATE)
38 &backend_deflate,
39 #endif
40 #if IS_ENABLED(CONFIG_ZRAM_BACKEND_842)
41 &backend_842,
42 #endif
43 NULL
44 };
45
zcomp_strm_free(struct zcomp * comp,struct zcomp_strm * zstrm)46 static void zcomp_strm_free(struct zcomp *comp, struct zcomp_strm *zstrm)
47 {
48 comp->ops->destroy_ctx(&zstrm->ctx);
49 vfree(zstrm->local_copy);
50 vfree(zstrm->buffer);
51 zstrm->buffer = NULL;
52 }
53
zcomp_strm_init(struct zcomp * comp,struct zcomp_strm * zstrm)54 static int zcomp_strm_init(struct zcomp *comp, struct zcomp_strm *zstrm)
55 {
56 int ret;
57
58 ret = comp->ops->create_ctx(comp->params, &zstrm->ctx);
59 if (ret)
60 return ret;
61
62 zstrm->local_copy = vzalloc(PAGE_SIZE);
63 /*
64 * allocate 2 pages. 1 for compressed data, plus 1 extra for the
65 * case when compressed size is larger than the original one
66 */
67 zstrm->buffer = vzalloc(2 * PAGE_SIZE);
68 if (!zstrm->buffer || !zstrm->local_copy) {
69 zcomp_strm_free(comp, zstrm);
70 return -ENOMEM;
71 }
72 return 0;
73 }
74
lookup_backend_ops(const char * comp)75 static const struct zcomp_ops *lookup_backend_ops(const char *comp)
76 {
77 int i = 0;
78
79 while (backends[i]) {
80 if (sysfs_streq(comp, backends[i]->name))
81 break;
82 i++;
83 }
84 return backends[i];
85 }
86
zcomp_available_algorithm(const char * comp)87 bool zcomp_available_algorithm(const char *comp)
88 {
89 return lookup_backend_ops(comp) != NULL;
90 }
91
92 /* show available compressors */
zcomp_available_show(const char * comp,char * buf,ssize_t at)93 ssize_t zcomp_available_show(const char *comp, char *buf, ssize_t at)
94 {
95 int i;
96
97 for (i = 0; i < ARRAY_SIZE(backends) - 1; i++) {
98 if (!strcmp(comp, backends[i]->name)) {
99 at += sysfs_emit_at(buf, at, "[%s] ",
100 backends[i]->name);
101 } else {
102 at += sysfs_emit_at(buf, at, "%s ", backends[i]->name);
103 }
104 }
105
106 at += sysfs_emit_at(buf, at, "\n");
107 return at;
108 }
109
zcomp_stream_get(struct zcomp * comp)110 struct zcomp_strm *zcomp_stream_get(struct zcomp *comp)
111 {
112 for (;;) {
113 struct zcomp_strm *zstrm = raw_cpu_ptr(comp->stream);
114
115 /*
116 * Inspired by zswap
117 *
118 * stream is returned with ->mutex locked which prevents
119 * cpu_dead() from releasing this stream under us, however
120 * there is still a race window between raw_cpu_ptr() and
121 * mutex_lock(), during which we could have been migrated
122 * from a CPU that has already destroyed its stream. If
123 * so then unlock and re-try on the current CPU.
124 */
125 mutex_lock(&zstrm->lock);
126 if (likely(zstrm->buffer))
127 return zstrm;
128 mutex_unlock(&zstrm->lock);
129 }
130 }
131
zcomp_stream_put(struct zcomp_strm * zstrm)132 void zcomp_stream_put(struct zcomp_strm *zstrm)
133 {
134 mutex_unlock(&zstrm->lock);
135 }
136
zcomp_compress(struct zcomp * comp,struct zcomp_strm * zstrm,const void * src,unsigned int * dst_len)137 int zcomp_compress(struct zcomp *comp, struct zcomp_strm *zstrm,
138 const void *src, unsigned int *dst_len)
139 {
140 struct zcomp_req req = {
141 .src = src,
142 .dst = zstrm->buffer,
143 .src_len = PAGE_SIZE,
144 .dst_len = 2 * PAGE_SIZE,
145 };
146 int ret;
147
148 might_sleep();
149 ret = comp->ops->compress(comp->params, &zstrm->ctx, &req);
150 if (!ret)
151 *dst_len = req.dst_len;
152 return ret;
153 }
154
zcomp_decompress(struct zcomp * comp,struct zcomp_strm * zstrm,const void * src,unsigned int src_len,void * dst)155 int zcomp_decompress(struct zcomp *comp, struct zcomp_strm *zstrm,
156 const void *src, unsigned int src_len, void *dst)
157 {
158 struct zcomp_req req = {
159 .src = src,
160 .dst = dst,
161 .src_len = src_len,
162 .dst_len = PAGE_SIZE,
163 };
164
165 might_sleep();
166 return comp->ops->decompress(comp->params, &zstrm->ctx, &req);
167 }
168
zcomp_cpu_up_prepare(unsigned int cpu,struct hlist_node * node)169 int zcomp_cpu_up_prepare(unsigned int cpu, struct hlist_node *node)
170 {
171 struct zcomp *comp = hlist_entry(node, struct zcomp, node);
172 struct zcomp_strm *zstrm = per_cpu_ptr(comp->stream, cpu);
173 int ret;
174
175 ret = zcomp_strm_init(comp, zstrm);
176 if (ret)
177 pr_err("Can't allocate a compression stream\n");
178 return ret;
179 }
180
zcomp_cpu_dead(unsigned int cpu,struct hlist_node * node)181 int zcomp_cpu_dead(unsigned int cpu, struct hlist_node *node)
182 {
183 struct zcomp *comp = hlist_entry(node, struct zcomp, node);
184 struct zcomp_strm *zstrm = per_cpu_ptr(comp->stream, cpu);
185
186 mutex_lock(&zstrm->lock);
187 zcomp_strm_free(comp, zstrm);
188 mutex_unlock(&zstrm->lock);
189 return 0;
190 }
191
zcomp_init(struct zcomp * comp,struct zcomp_params * params)192 static int zcomp_init(struct zcomp *comp, struct zcomp_params *params)
193 {
194 int ret, cpu;
195
196 comp->stream = alloc_percpu(struct zcomp_strm);
197 if (!comp->stream)
198 return -ENOMEM;
199
200 comp->params = params;
201 ret = comp->ops->setup_params(comp->params);
202 if (ret)
203 goto cleanup;
204
205 for_each_possible_cpu(cpu)
206 mutex_init(&per_cpu_ptr(comp->stream, cpu)->lock);
207
208 ret = cpuhp_state_add_instance(CPUHP_ZCOMP_PREPARE, &comp->node);
209 if (ret < 0)
210 goto cleanup;
211
212 return 0;
213
214 cleanup:
215 comp->ops->release_params(comp->params);
216 free_percpu(comp->stream);
217 return ret;
218 }
219
zcomp_destroy(struct zcomp * comp)220 void zcomp_destroy(struct zcomp *comp)
221 {
222 cpuhp_state_remove_instance(CPUHP_ZCOMP_PREPARE, &comp->node);
223 comp->ops->release_params(comp->params);
224 free_percpu(comp->stream);
225 kfree(comp);
226 }
227
zcomp_create(const char * alg,struct zcomp_params * params)228 struct zcomp *zcomp_create(const char *alg, struct zcomp_params *params)
229 {
230 struct zcomp *comp;
231 int error;
232
233 /*
234 * The backends array has a sentinel NULL value, so the minimum
235 * size is 1. In order to be valid the array, apart from the
236 * sentinel NULL element, should have at least one compression
237 * backend selected.
238 */
239 BUILD_BUG_ON(ARRAY_SIZE(backends) <= 1);
240
241 comp = kzalloc(sizeof(struct zcomp), GFP_KERNEL);
242 if (!comp)
243 return ERR_PTR(-ENOMEM);
244
245 comp->ops = lookup_backend_ops(alg);
246 if (!comp->ops) {
247 kfree(comp);
248 return ERR_PTR(-EINVAL);
249 }
250
251 error = zcomp_init(comp, params);
252 if (error) {
253 kfree(comp);
254 return ERR_PTR(error);
255 }
256 return comp;
257 }
258