1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Driver for IBM Power 842 compression accelerator
4  *
5  * Copyright (C) IBM Corporation, 2012
6  *
7  * Authors: Robert Jennings <rcj@linux.vnet.ibm.com>
8  *          Seth Jennings <sjenning@linux.vnet.ibm.com>
9  */
10 
11 #include <asm/vio.h>
12 
13 #include "nx-842.h"
14 #include "nx_csbcpb.h" /* struct nx_csbcpb */
15 
16 MODULE_LICENSE("GPL");
17 MODULE_AUTHOR("Robert Jennings <rcj@linux.vnet.ibm.com>");
18 MODULE_DESCRIPTION("842 H/W Compression driver for IBM Power processors");
19 MODULE_ALIAS_CRYPTO("842");
20 MODULE_ALIAS_CRYPTO("842-nx");
21 
22 static struct nx842_constraints nx842_pseries_constraints = {
23 	.alignment =	DDE_BUFFER_ALIGN,
24 	.multiple =	DDE_BUFFER_LAST_MULT,
25 	.minimum =	DDE_BUFFER_LAST_MULT,
26 	.maximum =	PAGE_SIZE, /* dynamic, max_sync_size */
27 };
28 
check_constraints(unsigned long buf,unsigned int * len,bool in)29 static int check_constraints(unsigned long buf, unsigned int *len, bool in)
30 {
31 	if (!IS_ALIGNED(buf, nx842_pseries_constraints.alignment)) {
32 		pr_debug("%s buffer 0x%lx not aligned to 0x%x\n",
33 			 in ? "input" : "output", buf,
34 			 nx842_pseries_constraints.alignment);
35 		return -EINVAL;
36 	}
37 	if (*len % nx842_pseries_constraints.multiple) {
38 		pr_debug("%s buffer len 0x%x not multiple of 0x%x\n",
39 			 in ? "input" : "output", *len,
40 			 nx842_pseries_constraints.multiple);
41 		if (in)
42 			return -EINVAL;
43 		*len = round_down(*len, nx842_pseries_constraints.multiple);
44 	}
45 	if (*len < nx842_pseries_constraints.minimum) {
46 		pr_debug("%s buffer len 0x%x under minimum 0x%x\n",
47 			 in ? "input" : "output", *len,
48 			 nx842_pseries_constraints.minimum);
49 		return -EINVAL;
50 	}
51 	if (*len > nx842_pseries_constraints.maximum) {
52 		pr_debug("%s buffer len 0x%x over maximum 0x%x\n",
53 			 in ? "input" : "output", *len,
54 			 nx842_pseries_constraints.maximum);
55 		if (in)
56 			return -EINVAL;
57 		*len = nx842_pseries_constraints.maximum;
58 	}
59 	return 0;
60 }
61 
62 /* I assume we need to align the CSB? */
63 #define WORKMEM_ALIGN	(256)
64 
65 struct nx842_workmem {
66 	/* scatterlist */
67 	char slin[4096];
68 	char slout[4096];
69 	/* coprocessor status/parameter block */
70 	struct nx_csbcpb csbcpb;
71 
72 	char padding[WORKMEM_ALIGN];
73 } __aligned(WORKMEM_ALIGN);
74 
75 /* Macros for fields within nx_csbcpb */
76 /* Check the valid bit within the csbcpb valid field */
77 #define NX842_CSBCBP_VALID_CHK(x) (x & BIT_MASK(7))
78 
79 /* CE macros operate on the completion_extension field bits in the csbcpb.
80  * CE0 0=full completion, 1=partial completion
81  * CE1 0=CE0 indicates completion, 1=termination (output may be modified)
82  * CE2 0=processed_bytes is source bytes, 1=processed_bytes is target bytes */
83 #define NX842_CSBCPB_CE0(x)	(x & BIT_MASK(7))
84 #define NX842_CSBCPB_CE1(x)	(x & BIT_MASK(6))
85 #define NX842_CSBCPB_CE2(x)	(x & BIT_MASK(5))
86 
87 /* The NX unit accepts data only on 4K page boundaries */
88 #define NX842_HW_PAGE_SIZE	(4096)
89 #define NX842_HW_PAGE_MASK	(~(NX842_HW_PAGE_SIZE-1))
90 
91 struct ibm_nx842_counters {
92 	atomic64_t comp_complete;
93 	atomic64_t comp_failed;
94 	atomic64_t decomp_complete;
95 	atomic64_t decomp_failed;
96 	atomic64_t swdecomp;
97 	atomic64_t comp_times[32];
98 	atomic64_t decomp_times[32];
99 };
100 
101 static struct nx842_devdata {
102 	struct vio_dev *vdev;
103 	struct device *dev;
104 	struct ibm_nx842_counters *counters;
105 	unsigned int max_sg_len;
106 	unsigned int max_sync_size;
107 	unsigned int max_sync_sg;
108 } __rcu *devdata;
109 static DEFINE_SPINLOCK(devdata_mutex);
110 
111 #define NX842_COUNTER_INC(_x) \
112 static inline void nx842_inc_##_x( \
113 	const struct nx842_devdata *dev) { \
114 	if (dev) \
115 		atomic64_inc(&dev->counters->_x); \
116 }
117 NX842_COUNTER_INC(comp_complete);
118 NX842_COUNTER_INC(comp_failed);
119 NX842_COUNTER_INC(decomp_complete);
120 NX842_COUNTER_INC(decomp_failed);
121 NX842_COUNTER_INC(swdecomp);
122 
123 #define NX842_HIST_SLOTS 16
124 
ibm_nx842_incr_hist(atomic64_t * times,unsigned int time)125 static void ibm_nx842_incr_hist(atomic64_t *times, unsigned int time)
126 {
127 	int bucket = fls(time);
128 
129 	if (bucket)
130 		bucket = min((NX842_HIST_SLOTS - 1), bucket - 1);
131 
132 	atomic64_inc(&times[bucket]);
133 }
134 
135 /* NX unit operation flags */
136 #define NX842_OP_COMPRESS	0x0
137 #define NX842_OP_CRC		0x1
138 #define NX842_OP_DECOMPRESS	0x2
139 #define NX842_OP_COMPRESS_CRC   (NX842_OP_COMPRESS | NX842_OP_CRC)
140 #define NX842_OP_DECOMPRESS_CRC (NX842_OP_DECOMPRESS | NX842_OP_CRC)
141 #define NX842_OP_ASYNC		(1<<23)
142 #define NX842_OP_NOTIFY		(1<<22)
143 #define NX842_OP_NOTIFY_INT(x)	((x & 0xff)<<8)
144 
nx842_get_desired_dma(struct vio_dev * viodev)145 static unsigned long nx842_get_desired_dma(struct vio_dev *viodev)
146 {
147 	/* No use of DMA mappings within the driver. */
148 	return 0;
149 }
150 
151 struct nx842_slentry {
152 	__be64 ptr; /* Real address (use __pa()) */
153 	__be64 len;
154 };
155 
156 /* pHyp scatterlist entry */
157 struct nx842_scatterlist {
158 	int entry_nr; /* number of slentries */
159 	struct nx842_slentry *entries; /* ptr to array of slentries */
160 };
161 
162 /* Does not include sizeof(entry_nr) in the size */
nx842_get_scatterlist_size(struct nx842_scatterlist * sl)163 static inline unsigned long nx842_get_scatterlist_size(
164 				struct nx842_scatterlist *sl)
165 {
166 	return sl->entry_nr * sizeof(struct nx842_slentry);
167 }
168 
nx842_build_scatterlist(unsigned long buf,int len,struct nx842_scatterlist * sl)169 static int nx842_build_scatterlist(unsigned long buf, int len,
170 			struct nx842_scatterlist *sl)
171 {
172 	unsigned long entrylen;
173 	struct nx842_slentry *entry;
174 
175 	sl->entry_nr = 0;
176 
177 	entry = sl->entries;
178 	while (len) {
179 		entry->ptr = cpu_to_be64(nx842_get_pa((void *)buf));
180 		entrylen = min_t(int, len,
181 				 LEN_ON_SIZE(buf, NX842_HW_PAGE_SIZE));
182 		entry->len = cpu_to_be64(entrylen);
183 
184 		len -= entrylen;
185 		buf += entrylen;
186 
187 		sl->entry_nr++;
188 		entry++;
189 	}
190 
191 	return 0;
192 }
193 
nx842_validate_result(struct device * dev,struct cop_status_block * csb)194 static int nx842_validate_result(struct device *dev,
195 	struct cop_status_block *csb)
196 {
197 	/* The csb must be valid after returning from vio_h_cop_sync */
198 	if (!NX842_CSBCBP_VALID_CHK(csb->valid)) {
199 		dev_err(dev, "%s: cspcbp not valid upon completion.\n",
200 				__func__);
201 		dev_dbg(dev, "valid:0x%02x cs:0x%02x cc:0x%02x ce:0x%02x\n",
202 				csb->valid,
203 				csb->crb_seq_number,
204 				csb->completion_code,
205 				csb->completion_extension);
206 		dev_dbg(dev, "processed_bytes:%d address:0x%016lx\n",
207 				be32_to_cpu(csb->processed_byte_count),
208 				(unsigned long)be64_to_cpu(csb->address));
209 		return -EIO;
210 	}
211 
212 	/* Check return values from the hardware in the CSB */
213 	switch (csb->completion_code) {
214 	case 0:	/* Completed without error */
215 		break;
216 	case 64: /* Compression ok, but output larger than input */
217 		dev_dbg(dev, "%s: output size larger than input size\n",
218 					__func__);
219 		break;
220 	case 13: /* Output buffer too small */
221 		dev_dbg(dev, "%s: Out of space in output buffer\n",
222 					__func__);
223 		return -ENOSPC;
224 	case 65: /* Calculated CRC doesn't match the passed value */
225 		dev_dbg(dev, "%s: CRC mismatch for decompression\n",
226 					__func__);
227 		return -EINVAL;
228 	case 66: /* Input data contains an illegal template field */
229 	case 67: /* Template indicates data past the end of the input stream */
230 		dev_dbg(dev, "%s: Bad data for decompression (code:%d)\n",
231 					__func__, csb->completion_code);
232 		return -EINVAL;
233 	default:
234 		dev_dbg(dev, "%s: Unspecified error (code:%d)\n",
235 					__func__, csb->completion_code);
236 		return -EIO;
237 	}
238 
239 	/* Hardware sanity check */
240 	if (!NX842_CSBCPB_CE2(csb->completion_extension)) {
241 		dev_err(dev, "%s: No error returned by hardware, but "
242 				"data returned is unusable, contact support.\n"
243 				"(Additional info: csbcbp->processed bytes "
244 				"does not specify processed bytes for the "
245 				"target buffer.)\n", __func__);
246 		return -EIO;
247 	}
248 
249 	return 0;
250 }
251 
252 /**
253  * nx842_pseries_compress - Compress data using the 842 algorithm
254  *
255  * Compression provide by the NX842 coprocessor on IBM Power systems.
256  * The input buffer is compressed and the result is stored in the
257  * provided output buffer.
258  *
259  * Upon return from this function @outlen contains the length of the
260  * compressed data.  If there is an error then @outlen will be 0 and an
261  * error will be specified by the return code from this function.
262  *
263  * @in: Pointer to input buffer
264  * @inlen: Length of input buffer
265  * @out: Pointer to output buffer
266  * @outlen: Length of output buffer
267  * @wrkmem: ptr to buffer for working memory, size determined by
268  *          nx842_pseries_driver.workmem_size
269  *
270  * Returns:
271  *   0		Success, output of length @outlen stored in the buffer at @out
272  *   -ENOMEM	Unable to allocate internal buffers
273  *   -ENOSPC	Output buffer is to small
274  *   -EIO	Internal error
275  *   -ENODEV	Hardware unavailable
276  */
nx842_pseries_compress(const unsigned char * in,unsigned int inlen,unsigned char * out,unsigned int * outlen,void * wmem)277 static int nx842_pseries_compress(const unsigned char *in, unsigned int inlen,
278 				  unsigned char *out, unsigned int *outlen,
279 				  void *wmem)
280 {
281 	struct nx842_devdata *local_devdata;
282 	struct device *dev = NULL;
283 	struct nx842_workmem *workmem;
284 	struct nx842_scatterlist slin, slout;
285 	struct nx_csbcpb *csbcpb;
286 	int ret = 0;
287 	unsigned long inbuf, outbuf;
288 	struct vio_pfo_op op = {
289 		.done = NULL,
290 		.handle = 0,
291 		.timeout = 0,
292 	};
293 	unsigned long start = get_tb();
294 
295 	inbuf = (unsigned long)in;
296 	if (check_constraints(inbuf, &inlen, true))
297 		return -EINVAL;
298 
299 	outbuf = (unsigned long)out;
300 	if (check_constraints(outbuf, outlen, false))
301 		return -EINVAL;
302 
303 	rcu_read_lock();
304 	local_devdata = rcu_dereference(devdata);
305 	if (!local_devdata || !local_devdata->dev) {
306 		rcu_read_unlock();
307 		return -ENODEV;
308 	}
309 	dev = local_devdata->dev;
310 
311 	/* Init scatterlist */
312 	workmem = PTR_ALIGN(wmem, WORKMEM_ALIGN);
313 	slin.entries = (struct nx842_slentry *)workmem->slin;
314 	slout.entries = (struct nx842_slentry *)workmem->slout;
315 
316 	/* Init operation */
317 	op.flags = NX842_OP_COMPRESS_CRC;
318 	csbcpb = &workmem->csbcpb;
319 	memset(csbcpb, 0, sizeof(*csbcpb));
320 	op.csbcpb = nx842_get_pa(csbcpb);
321 
322 	if ((inbuf & NX842_HW_PAGE_MASK) ==
323 	    ((inbuf + inlen - 1) & NX842_HW_PAGE_MASK)) {
324 		/* Create direct DDE */
325 		op.in = nx842_get_pa((void *)inbuf);
326 		op.inlen = inlen;
327 	} else {
328 		/* Create indirect DDE (scatterlist) */
329 		nx842_build_scatterlist(inbuf, inlen, &slin);
330 		op.in = nx842_get_pa(slin.entries);
331 		op.inlen = -nx842_get_scatterlist_size(&slin);
332 	}
333 
334 	if ((outbuf & NX842_HW_PAGE_MASK) ==
335 	    ((outbuf + *outlen - 1) & NX842_HW_PAGE_MASK)) {
336 		/* Create direct DDE */
337 		op.out = nx842_get_pa((void *)outbuf);
338 		op.outlen = *outlen;
339 	} else {
340 		/* Create indirect DDE (scatterlist) */
341 		nx842_build_scatterlist(outbuf, *outlen, &slout);
342 		op.out = nx842_get_pa(slout.entries);
343 		op.outlen = -nx842_get_scatterlist_size(&slout);
344 	}
345 
346 	dev_dbg(dev, "%s: op.in %lx op.inlen %ld op.out %lx op.outlen %ld\n",
347 		__func__, (unsigned long)op.in, (long)op.inlen,
348 		(unsigned long)op.out, (long)op.outlen);
349 
350 	/* Send request to pHyp */
351 	ret = vio_h_cop_sync(local_devdata->vdev, &op);
352 
353 	/* Check for pHyp error */
354 	if (ret) {
355 		dev_dbg(dev, "%s: vio_h_cop_sync error (ret=%d, hret=%ld)\n",
356 			__func__, ret, op.hcall_err);
357 		ret = -EIO;
358 		goto unlock;
359 	}
360 
361 	/* Check for hardware error */
362 	ret = nx842_validate_result(dev, &csbcpb->csb);
363 	if (ret)
364 		goto unlock;
365 
366 	*outlen = be32_to_cpu(csbcpb->csb.processed_byte_count);
367 	dev_dbg(dev, "%s: processed_bytes=%d\n", __func__, *outlen);
368 
369 unlock:
370 	if (ret)
371 		nx842_inc_comp_failed(local_devdata);
372 	else {
373 		nx842_inc_comp_complete(local_devdata);
374 		ibm_nx842_incr_hist(local_devdata->counters->comp_times,
375 			(get_tb() - start) / tb_ticks_per_usec);
376 	}
377 	rcu_read_unlock();
378 	return ret;
379 }
380 
381 /**
382  * nx842_pseries_decompress - Decompress data using the 842 algorithm
383  *
384  * Decompression provide by the NX842 coprocessor on IBM Power systems.
385  * The input buffer is decompressed and the result is stored in the
386  * provided output buffer.  The size allocated to the output buffer is
387  * provided by the caller of this function in @outlen.  Upon return from
388  * this function @outlen contains the length of the decompressed data.
389  * If there is an error then @outlen will be 0 and an error will be
390  * specified by the return code from this function.
391  *
392  * @in: Pointer to input buffer
393  * @inlen: Length of input buffer
394  * @out: Pointer to output buffer
395  * @outlen: Length of output buffer
396  * @wrkmem: ptr to buffer for working memory, size determined by
397  *          nx842_pseries_driver.workmem_size
398  *
399  * Returns:
400  *   0		Success, output of length @outlen stored in the buffer at @out
401  *   -ENODEV	Hardware decompression device is unavailable
402  *   -ENOMEM	Unable to allocate internal buffers
403  *   -ENOSPC	Output buffer is to small
404  *   -EINVAL	Bad input data encountered when attempting decompress
405  *   -EIO	Internal error
406  */
nx842_pseries_decompress(const unsigned char * in,unsigned int inlen,unsigned char * out,unsigned int * outlen,void * wmem)407 static int nx842_pseries_decompress(const unsigned char *in, unsigned int inlen,
408 				    unsigned char *out, unsigned int *outlen,
409 				    void *wmem)
410 {
411 	struct nx842_devdata *local_devdata;
412 	struct device *dev = NULL;
413 	struct nx842_workmem *workmem;
414 	struct nx842_scatterlist slin, slout;
415 	struct nx_csbcpb *csbcpb;
416 	int ret = 0;
417 	unsigned long inbuf, outbuf;
418 	struct vio_pfo_op op = {
419 		.done = NULL,
420 		.handle = 0,
421 		.timeout = 0,
422 	};
423 	unsigned long start = get_tb();
424 
425 	/* Ensure page alignment and size */
426 	inbuf = (unsigned long)in;
427 	if (check_constraints(inbuf, &inlen, true))
428 		return -EINVAL;
429 
430 	outbuf = (unsigned long)out;
431 	if (check_constraints(outbuf, outlen, false))
432 		return -EINVAL;
433 
434 	rcu_read_lock();
435 	local_devdata = rcu_dereference(devdata);
436 	if (!local_devdata || !local_devdata->dev) {
437 		rcu_read_unlock();
438 		return -ENODEV;
439 	}
440 	dev = local_devdata->dev;
441 
442 	workmem = PTR_ALIGN(wmem, WORKMEM_ALIGN);
443 
444 	/* Init scatterlist */
445 	slin.entries = (struct nx842_slentry *)workmem->slin;
446 	slout.entries = (struct nx842_slentry *)workmem->slout;
447 
448 	/* Init operation */
449 	op.flags = NX842_OP_DECOMPRESS_CRC;
450 	csbcpb = &workmem->csbcpb;
451 	memset(csbcpb, 0, sizeof(*csbcpb));
452 	op.csbcpb = nx842_get_pa(csbcpb);
453 
454 	if ((inbuf & NX842_HW_PAGE_MASK) ==
455 	    ((inbuf + inlen - 1) & NX842_HW_PAGE_MASK)) {
456 		/* Create direct DDE */
457 		op.in = nx842_get_pa((void *)inbuf);
458 		op.inlen = inlen;
459 	} else {
460 		/* Create indirect DDE (scatterlist) */
461 		nx842_build_scatterlist(inbuf, inlen, &slin);
462 		op.in = nx842_get_pa(slin.entries);
463 		op.inlen = -nx842_get_scatterlist_size(&slin);
464 	}
465 
466 	if ((outbuf & NX842_HW_PAGE_MASK) ==
467 	    ((outbuf + *outlen - 1) & NX842_HW_PAGE_MASK)) {
468 		/* Create direct DDE */
469 		op.out = nx842_get_pa((void *)outbuf);
470 		op.outlen = *outlen;
471 	} else {
472 		/* Create indirect DDE (scatterlist) */
473 		nx842_build_scatterlist(outbuf, *outlen, &slout);
474 		op.out = nx842_get_pa(slout.entries);
475 		op.outlen = -nx842_get_scatterlist_size(&slout);
476 	}
477 
478 	dev_dbg(dev, "%s: op.in %lx op.inlen %ld op.out %lx op.outlen %ld\n",
479 		__func__, (unsigned long)op.in, (long)op.inlen,
480 		(unsigned long)op.out, (long)op.outlen);
481 
482 	/* Send request to pHyp */
483 	ret = vio_h_cop_sync(local_devdata->vdev, &op);
484 
485 	/* Check for pHyp error */
486 	if (ret) {
487 		dev_dbg(dev, "%s: vio_h_cop_sync error (ret=%d, hret=%ld)\n",
488 			__func__, ret, op.hcall_err);
489 		goto unlock;
490 	}
491 
492 	/* Check for hardware error */
493 	ret = nx842_validate_result(dev, &csbcpb->csb);
494 	if (ret)
495 		goto unlock;
496 
497 	*outlen = be32_to_cpu(csbcpb->csb.processed_byte_count);
498 
499 unlock:
500 	if (ret)
501 		/* decompress fail */
502 		nx842_inc_decomp_failed(local_devdata);
503 	else {
504 		nx842_inc_decomp_complete(local_devdata);
505 		ibm_nx842_incr_hist(local_devdata->counters->decomp_times,
506 			(get_tb() - start) / tb_ticks_per_usec);
507 	}
508 
509 	rcu_read_unlock();
510 	return ret;
511 }
512 
513 /**
514  * nx842_OF_set_defaults -- Set default (disabled) values for devdata
515  *
516  * @devdata - struct nx842_devdata to update
517  *
518  * Returns:
519  *  0 on success
520  *  -ENOENT if @devdata ptr is NULL
521  */
nx842_OF_set_defaults(struct nx842_devdata * devdata)522 static int nx842_OF_set_defaults(struct nx842_devdata *devdata)
523 {
524 	if (devdata) {
525 		devdata->max_sync_size = 0;
526 		devdata->max_sync_sg = 0;
527 		devdata->max_sg_len = 0;
528 		return 0;
529 	} else
530 		return -ENOENT;
531 }
532 
533 /**
534  * nx842_OF_upd_status -- Check the device info from OF status prop
535  *
536  * The status property indicates if the accelerator is enabled.  If the
537  * device is in the OF tree it indicates that the hardware is present.
538  * The status field indicates if the device is enabled when the status
539  * is 'okay'.  Otherwise the device driver will be disabled.
540  *
541  * @prop - struct property point containing the maxsyncop for the update
542  *
543  * Returns:
544  *  0 - Device is available
545  *  -ENODEV - Device is not available
546  */
nx842_OF_upd_status(struct property * prop)547 static int nx842_OF_upd_status(struct property *prop)
548 {
549 	const char *status = (const char *)prop->value;
550 
551 	if (!strncmp(status, "okay", (size_t)prop->length))
552 		return 0;
553 	if (!strncmp(status, "disabled", (size_t)prop->length))
554 		return -ENODEV;
555 	dev_info(devdata->dev, "%s: unknown status '%s'\n", __func__, status);
556 
557 	return -EINVAL;
558 }
559 
560 /**
561  * nx842_OF_upd_maxsglen -- Update the device info from OF maxsglen prop
562  *
563  * Definition of the 'ibm,max-sg-len' OF property:
564  *  This field indicates the maximum byte length of a scatter list
565  *  for the platform facility. It is a single cell encoded as with encode-int.
566  *
567  * Example:
568  *  # od -x ibm,max-sg-len
569  *  0000000 0000 0ff0
570  *
571  *  In this example, the maximum byte length of a scatter list is
572  *  0x0ff0 (4,080).
573  *
574  * @devdata - struct nx842_devdata to update
575  * @prop - struct property point containing the maxsyncop for the update
576  *
577  * Returns:
578  *  0 on success
579  *  -EINVAL on failure
580  */
nx842_OF_upd_maxsglen(struct nx842_devdata * devdata,struct property * prop)581 static int nx842_OF_upd_maxsglen(struct nx842_devdata *devdata,
582 					struct property *prop) {
583 	int ret = 0;
584 	const unsigned int maxsglen = of_read_number(prop->value, 1);
585 
586 	if (prop->length != sizeof(maxsglen)) {
587 		dev_err(devdata->dev, "%s: unexpected format for ibm,max-sg-len property\n", __func__);
588 		dev_dbg(devdata->dev, "%s: ibm,max-sg-len is %d bytes long, expected %lu bytes\n", __func__,
589 				prop->length, sizeof(maxsglen));
590 		ret = -EINVAL;
591 	} else {
592 		devdata->max_sg_len = min_t(unsigned int,
593 					    maxsglen, NX842_HW_PAGE_SIZE);
594 	}
595 
596 	return ret;
597 }
598 
599 /**
600  * nx842_OF_upd_maxsyncop -- Update the device info from OF maxsyncop prop
601  *
602  * Definition of the 'ibm,max-sync-cop' OF property:
603  *  Two series of cells.  The first series of cells represents the maximums
604  *  that can be synchronously compressed. The second series of cells
605  *  represents the maximums that can be synchronously decompressed.
606  *  1. The first cell in each series contains the count of the number of
607  *     data length, scatter list elements pairs that follow – each being
608  *     of the form
609  *    a. One cell data byte length
610  *    b. One cell total number of scatter list elements
611  *
612  * Example:
613  *  # od -x ibm,max-sync-cop
614  *  0000000 0000 0001 0000 1000 0000 01fe 0000 0001
615  *  0000020 0000 1000 0000 01fe
616  *
617  *  In this example, compression supports 0x1000 (4,096) data byte length
618  *  and 0x1fe (510) total scatter list elements.  Decompression supports
619  *  0x1000 (4,096) data byte length and 0x1f3 (510) total scatter list
620  *  elements.
621  *
622  * @devdata - struct nx842_devdata to update
623  * @prop - struct property point containing the maxsyncop for the update
624  *
625  * Returns:
626  *  0 on success
627  *  -EINVAL on failure
628  */
nx842_OF_upd_maxsyncop(struct nx842_devdata * devdata,struct property * prop)629 static int nx842_OF_upd_maxsyncop(struct nx842_devdata *devdata,
630 					struct property *prop) {
631 	int ret = 0;
632 	unsigned int comp_data_limit, decomp_data_limit;
633 	unsigned int comp_sg_limit, decomp_sg_limit;
634 	const struct maxsynccop_t {
635 		__be32 comp_elements;
636 		__be32 comp_data_limit;
637 		__be32 comp_sg_limit;
638 		__be32 decomp_elements;
639 		__be32 decomp_data_limit;
640 		__be32 decomp_sg_limit;
641 	} *maxsynccop;
642 
643 	if (prop->length != sizeof(*maxsynccop)) {
644 		dev_err(devdata->dev, "%s: unexpected format for ibm,max-sync-cop property\n", __func__);
645 		dev_dbg(devdata->dev, "%s: ibm,max-sync-cop is %d bytes long, expected %lu bytes\n", __func__, prop->length,
646 				sizeof(*maxsynccop));
647 		ret = -EINVAL;
648 		goto out;
649 	}
650 
651 	maxsynccop = (const struct maxsynccop_t *)prop->value;
652 	comp_data_limit = be32_to_cpu(maxsynccop->comp_data_limit);
653 	comp_sg_limit = be32_to_cpu(maxsynccop->comp_sg_limit);
654 	decomp_data_limit = be32_to_cpu(maxsynccop->decomp_data_limit);
655 	decomp_sg_limit = be32_to_cpu(maxsynccop->decomp_sg_limit);
656 
657 	/* Use one limit rather than separate limits for compression and
658 	 * decompression. Set a maximum for this so as not to exceed the
659 	 * size that the header can support and round the value down to
660 	 * the hardware page size (4K) */
661 	devdata->max_sync_size = min(comp_data_limit, decomp_data_limit);
662 
663 	devdata->max_sync_size = min_t(unsigned int, devdata->max_sync_size,
664 					65536);
665 
666 	if (devdata->max_sync_size < 4096) {
667 		dev_err(devdata->dev, "%s: hardware max data size (%u) is "
668 				"less than the driver minimum, unable to use "
669 				"the hardware device\n",
670 				__func__, devdata->max_sync_size);
671 		ret = -EINVAL;
672 		goto out;
673 	}
674 
675 	nx842_pseries_constraints.maximum = devdata->max_sync_size;
676 
677 	devdata->max_sync_sg = min(comp_sg_limit, decomp_sg_limit);
678 	if (devdata->max_sync_sg < 1) {
679 		dev_err(devdata->dev, "%s: hardware max sg size (%u) is "
680 				"less than the driver minimum, unable to use "
681 				"the hardware device\n",
682 				__func__, devdata->max_sync_sg);
683 		ret = -EINVAL;
684 		goto out;
685 	}
686 
687 out:
688 	return ret;
689 }
690 
691 /**
692  *
693  * nx842_OF_upd -- Handle OF properties updates for the device.
694  *
695  * Set all properties from the OF tree.  Optionally, a new property
696  * can be provided by the @new_prop pointer to overwrite an existing value.
697  * The device will remain disabled until all values are valid, this function
698  * will return an error for updates unless all values are valid.
699  *
700  * @new_prop: If not NULL, this property is being updated.  If NULL, update
701  *  all properties from the current values in the OF tree.
702  *
703  * Returns:
704  *  0 - Success
705  *  -ENOMEM - Could not allocate memory for new devdata structure
706  *  -EINVAL - property value not found, new_prop is not a recognized
707  *	property for the device or property value is not valid.
708  *  -ENODEV - Device is not available
709  */
nx842_OF_upd(struct property * new_prop)710 static int nx842_OF_upd(struct property *new_prop)
711 {
712 	struct nx842_devdata *old_devdata = NULL;
713 	struct nx842_devdata *new_devdata = NULL;
714 	struct device_node *of_node = NULL;
715 	struct property *status = NULL;
716 	struct property *maxsglen = NULL;
717 	struct property *maxsyncop = NULL;
718 	int ret = 0;
719 	unsigned long flags;
720 
721 	new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS);
722 	if (!new_devdata)
723 		return -ENOMEM;
724 
725 	spin_lock_irqsave(&devdata_mutex, flags);
726 	old_devdata = rcu_dereference_check(devdata,
727 			lockdep_is_held(&devdata_mutex));
728 	if (old_devdata)
729 		of_node = old_devdata->dev->of_node;
730 
731 	if (!old_devdata || !of_node) {
732 		pr_err("%s: device is not available\n", __func__);
733 		spin_unlock_irqrestore(&devdata_mutex, flags);
734 		kfree(new_devdata);
735 		return -ENODEV;
736 	}
737 
738 	memcpy(new_devdata, old_devdata, sizeof(*old_devdata));
739 	new_devdata->counters = old_devdata->counters;
740 
741 	/* Set ptrs for existing properties */
742 	status = of_find_property(of_node, "status", NULL);
743 	maxsglen = of_find_property(of_node, "ibm,max-sg-len", NULL);
744 	maxsyncop = of_find_property(of_node, "ibm,max-sync-cop", NULL);
745 	if (!status || !maxsglen || !maxsyncop) {
746 		dev_err(old_devdata->dev, "%s: Could not locate device properties\n", __func__);
747 		ret = -EINVAL;
748 		goto error_out;
749 	}
750 
751 	/*
752 	 * If this is a property update, there are only certain properties that
753 	 * we care about. Bail if it isn't in the below list
754 	 */
755 	if (new_prop && (strncmp(new_prop->name, "status", new_prop->length) ||
756 		         strncmp(new_prop->name, "ibm,max-sg-len", new_prop->length) ||
757 		         strncmp(new_prop->name, "ibm,max-sync-cop", new_prop->length)))
758 		goto out;
759 
760 	/* Perform property updates */
761 	ret = nx842_OF_upd_status(status);
762 	if (ret)
763 		goto error_out;
764 
765 	ret = nx842_OF_upd_maxsglen(new_devdata, maxsglen);
766 	if (ret)
767 		goto error_out;
768 
769 	ret = nx842_OF_upd_maxsyncop(new_devdata, maxsyncop);
770 	if (ret)
771 		goto error_out;
772 
773 out:
774 	dev_info(old_devdata->dev, "%s: max_sync_size new:%u old:%u\n",
775 			__func__, new_devdata->max_sync_size,
776 			old_devdata->max_sync_size);
777 	dev_info(old_devdata->dev, "%s: max_sync_sg new:%u old:%u\n",
778 			__func__, new_devdata->max_sync_sg,
779 			old_devdata->max_sync_sg);
780 	dev_info(old_devdata->dev, "%s: max_sg_len new:%u old:%u\n",
781 			__func__, new_devdata->max_sg_len,
782 			old_devdata->max_sg_len);
783 
784 	rcu_assign_pointer(devdata, new_devdata);
785 	spin_unlock_irqrestore(&devdata_mutex, flags);
786 	synchronize_rcu();
787 	dev_set_drvdata(new_devdata->dev, new_devdata);
788 	kfree(old_devdata);
789 	return 0;
790 
791 error_out:
792 	if (new_devdata) {
793 		dev_info(old_devdata->dev, "%s: device disabled\n", __func__);
794 		nx842_OF_set_defaults(new_devdata);
795 		rcu_assign_pointer(devdata, new_devdata);
796 		spin_unlock_irqrestore(&devdata_mutex, flags);
797 		synchronize_rcu();
798 		dev_set_drvdata(new_devdata->dev, new_devdata);
799 		kfree(old_devdata);
800 	} else {
801 		dev_err(old_devdata->dev, "%s: could not update driver from hardware\n", __func__);
802 		spin_unlock_irqrestore(&devdata_mutex, flags);
803 	}
804 
805 	if (!ret)
806 		ret = -EINVAL;
807 	return ret;
808 }
809 
810 /**
811  * nx842_OF_notifier - Process updates to OF properties for the device
812  *
813  * @np: notifier block
814  * @action: notifier action
815  * @update: struct pSeries_reconfig_prop_update pointer if action is
816  *	PSERIES_UPDATE_PROPERTY
817  *
818  * Returns:
819  *	NOTIFY_OK on success
820  *	NOTIFY_BAD encoded with error number on failure, use
821  *		notifier_to_errno() to decode this value
822  */
nx842_OF_notifier(struct notifier_block * np,unsigned long action,void * data)823 static int nx842_OF_notifier(struct notifier_block *np, unsigned long action,
824 			     void *data)
825 {
826 	struct of_reconfig_data *upd = data;
827 	struct nx842_devdata *local_devdata;
828 	struct device_node *node = NULL;
829 
830 	rcu_read_lock();
831 	local_devdata = rcu_dereference(devdata);
832 	if (local_devdata)
833 		node = local_devdata->dev->of_node;
834 
835 	if (local_devdata &&
836 			action == OF_RECONFIG_UPDATE_PROPERTY &&
837 			!strcmp(upd->dn->name, node->name)) {
838 		rcu_read_unlock();
839 		nx842_OF_upd(upd->prop);
840 	} else
841 		rcu_read_unlock();
842 
843 	return NOTIFY_OK;
844 }
845 
846 static struct notifier_block nx842_of_nb = {
847 	.notifier_call = nx842_OF_notifier,
848 };
849 
850 #define nx842_counter_read(_name)					\
851 static ssize_t nx842_##_name##_show(struct device *dev,		\
852 		struct device_attribute *attr,				\
853 		char *buf) {						\
854 	struct nx842_devdata *local_devdata;			\
855 	int p = 0;							\
856 	rcu_read_lock();						\
857 	local_devdata = rcu_dereference(devdata);			\
858 	if (local_devdata)						\
859 		p = snprintf(buf, PAGE_SIZE, "%lld\n",			\
860 		       atomic64_read(&local_devdata->counters->_name));	\
861 	rcu_read_unlock();						\
862 	return p;							\
863 }
864 
865 #define NX842DEV_COUNTER_ATTR_RO(_name)					\
866 	nx842_counter_read(_name);					\
867 	static struct device_attribute dev_attr_##_name = __ATTR(_name,	\
868 						0444,			\
869 						nx842_##_name##_show,\
870 						NULL);
871 
872 NX842DEV_COUNTER_ATTR_RO(comp_complete);
873 NX842DEV_COUNTER_ATTR_RO(comp_failed);
874 NX842DEV_COUNTER_ATTR_RO(decomp_complete);
875 NX842DEV_COUNTER_ATTR_RO(decomp_failed);
876 NX842DEV_COUNTER_ATTR_RO(swdecomp);
877 
878 static ssize_t nx842_timehist_show(struct device *,
879 		struct device_attribute *, char *);
880 
881 static struct device_attribute dev_attr_comp_times = __ATTR(comp_times, 0444,
882 		nx842_timehist_show, NULL);
883 static struct device_attribute dev_attr_decomp_times = __ATTR(decomp_times,
884 		0444, nx842_timehist_show, NULL);
885 
nx842_timehist_show(struct device * dev,struct device_attribute * attr,char * buf)886 static ssize_t nx842_timehist_show(struct device *dev,
887 		struct device_attribute *attr, char *buf) {
888 	char *p = buf;
889 	struct nx842_devdata *local_devdata;
890 	atomic64_t *times;
891 	int bytes_remain = PAGE_SIZE;
892 	int bytes;
893 	int i;
894 
895 	rcu_read_lock();
896 	local_devdata = rcu_dereference(devdata);
897 	if (!local_devdata) {
898 		rcu_read_unlock();
899 		return 0;
900 	}
901 
902 	if (attr == &dev_attr_comp_times)
903 		times = local_devdata->counters->comp_times;
904 	else if (attr == &dev_attr_decomp_times)
905 		times = local_devdata->counters->decomp_times;
906 	else {
907 		rcu_read_unlock();
908 		return 0;
909 	}
910 
911 	for (i = 0; i < (NX842_HIST_SLOTS - 2); i++) {
912 		bytes = snprintf(p, bytes_remain, "%u-%uus:\t%lld\n",
913 			       i ? (2<<(i-1)) : 0, (2<<i)-1,
914 			       atomic64_read(&times[i]));
915 		bytes_remain -= bytes;
916 		p += bytes;
917 	}
918 	/* The last bucket holds everything over
919 	 * 2<<(NX842_HIST_SLOTS - 2) us */
920 	bytes = snprintf(p, bytes_remain, "%uus - :\t%lld\n",
921 			2<<(NX842_HIST_SLOTS - 2),
922 			atomic64_read(&times[(NX842_HIST_SLOTS - 1)]));
923 	p += bytes;
924 
925 	rcu_read_unlock();
926 	return p - buf;
927 }
928 
929 static struct attribute *nx842_sysfs_entries[] = {
930 	&dev_attr_comp_complete.attr,
931 	&dev_attr_comp_failed.attr,
932 	&dev_attr_decomp_complete.attr,
933 	&dev_attr_decomp_failed.attr,
934 	&dev_attr_swdecomp.attr,
935 	&dev_attr_comp_times.attr,
936 	&dev_attr_decomp_times.attr,
937 	NULL,
938 };
939 
940 static struct attribute_group nx842_attribute_group = {
941 	.name = NULL,		/* put in device directory */
942 	.attrs = nx842_sysfs_entries,
943 };
944 
945 static struct nx842_driver nx842_pseries_driver = {
946 	.name =		KBUILD_MODNAME,
947 	.owner =	THIS_MODULE,
948 	.workmem_size =	sizeof(struct nx842_workmem),
949 	.constraints =	&nx842_pseries_constraints,
950 	.compress =	nx842_pseries_compress,
951 	.decompress =	nx842_pseries_decompress,
952 };
953 
nx842_pseries_crypto_init(struct crypto_tfm * tfm)954 static int nx842_pseries_crypto_init(struct crypto_tfm *tfm)
955 {
956 	return nx842_crypto_init(tfm, &nx842_pseries_driver);
957 }
958 
959 static struct crypto_alg nx842_pseries_alg = {
960 	.cra_name		= "842",
961 	.cra_driver_name	= "842-nx",
962 	.cra_priority		= 300,
963 	.cra_flags		= CRYPTO_ALG_TYPE_COMPRESS,
964 	.cra_ctxsize		= sizeof(struct nx842_crypto_ctx),
965 	.cra_module		= THIS_MODULE,
966 	.cra_init		= nx842_pseries_crypto_init,
967 	.cra_exit		= nx842_crypto_exit,
968 	.cra_u			= { .compress = {
969 	.coa_compress		= nx842_crypto_compress,
970 	.coa_decompress		= nx842_crypto_decompress } }
971 };
972 
nx842_probe(struct vio_dev * viodev,const struct vio_device_id * id)973 static int nx842_probe(struct vio_dev *viodev,
974 		       const struct vio_device_id *id)
975 {
976 	struct nx842_devdata *old_devdata, *new_devdata = NULL;
977 	unsigned long flags;
978 	int ret = 0;
979 
980 	new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS);
981 	if (!new_devdata)
982 		return -ENOMEM;
983 
984 	new_devdata->counters = kzalloc(sizeof(*new_devdata->counters),
985 			GFP_NOFS);
986 	if (!new_devdata->counters) {
987 		kfree(new_devdata);
988 		return -ENOMEM;
989 	}
990 
991 	spin_lock_irqsave(&devdata_mutex, flags);
992 	old_devdata = rcu_dereference_check(devdata,
993 			lockdep_is_held(&devdata_mutex));
994 
995 	if (old_devdata && old_devdata->vdev != NULL) {
996 		dev_err(&viodev->dev, "%s: Attempt to register more than one instance of the hardware\n", __func__);
997 		ret = -1;
998 		goto error_unlock;
999 	}
1000 
1001 	dev_set_drvdata(&viodev->dev, NULL);
1002 
1003 	new_devdata->vdev = viodev;
1004 	new_devdata->dev = &viodev->dev;
1005 	nx842_OF_set_defaults(new_devdata);
1006 
1007 	rcu_assign_pointer(devdata, new_devdata);
1008 	spin_unlock_irqrestore(&devdata_mutex, flags);
1009 	synchronize_rcu();
1010 	kfree(old_devdata);
1011 
1012 	of_reconfig_notifier_register(&nx842_of_nb);
1013 
1014 	ret = nx842_OF_upd(NULL);
1015 	if (ret)
1016 		goto error;
1017 
1018 	ret = crypto_register_alg(&nx842_pseries_alg);
1019 	if (ret) {
1020 		dev_err(&viodev->dev, "could not register comp alg: %d\n", ret);
1021 		goto error;
1022 	}
1023 
1024 	rcu_read_lock();
1025 	dev_set_drvdata(&viodev->dev, rcu_dereference(devdata));
1026 	rcu_read_unlock();
1027 
1028 	if (sysfs_create_group(&viodev->dev.kobj, &nx842_attribute_group)) {
1029 		dev_err(&viodev->dev, "could not create sysfs device attributes\n");
1030 		ret = -1;
1031 		goto error;
1032 	}
1033 
1034 	return 0;
1035 
1036 error_unlock:
1037 	spin_unlock_irqrestore(&devdata_mutex, flags);
1038 	if (new_devdata)
1039 		kfree(new_devdata->counters);
1040 	kfree(new_devdata);
1041 error:
1042 	return ret;
1043 }
1044 
nx842_remove(struct vio_dev * viodev)1045 static int nx842_remove(struct vio_dev *viodev)
1046 {
1047 	struct nx842_devdata *old_devdata;
1048 	unsigned long flags;
1049 
1050 	pr_info("Removing IBM Power 842 compression device\n");
1051 	sysfs_remove_group(&viodev->dev.kobj, &nx842_attribute_group);
1052 
1053 	crypto_unregister_alg(&nx842_pseries_alg);
1054 
1055 	spin_lock_irqsave(&devdata_mutex, flags);
1056 	old_devdata = rcu_dereference_check(devdata,
1057 			lockdep_is_held(&devdata_mutex));
1058 	of_reconfig_notifier_unregister(&nx842_of_nb);
1059 	RCU_INIT_POINTER(devdata, NULL);
1060 	spin_unlock_irqrestore(&devdata_mutex, flags);
1061 	synchronize_rcu();
1062 	dev_set_drvdata(&viodev->dev, NULL);
1063 	if (old_devdata)
1064 		kfree(old_devdata->counters);
1065 	kfree(old_devdata);
1066 
1067 	return 0;
1068 }
1069 
1070 static const struct vio_device_id nx842_vio_driver_ids[] = {
1071 	{"ibm,compression-v1", "ibm,compression"},
1072 	{"", ""},
1073 };
1074 
1075 static struct vio_driver nx842_vio_driver = {
1076 	.name = KBUILD_MODNAME,
1077 	.probe = nx842_probe,
1078 	.remove = nx842_remove,
1079 	.get_desired_dma = nx842_get_desired_dma,
1080 	.id_table = nx842_vio_driver_ids,
1081 };
1082 
nx842_pseries_init(void)1083 static int __init nx842_pseries_init(void)
1084 {
1085 	struct nx842_devdata *new_devdata;
1086 	int ret;
1087 
1088 	if (!of_find_compatible_node(NULL, NULL, "ibm,compression"))
1089 		return -ENODEV;
1090 
1091 	RCU_INIT_POINTER(devdata, NULL);
1092 	new_devdata = kzalloc(sizeof(*new_devdata), GFP_KERNEL);
1093 	if (!new_devdata)
1094 		return -ENOMEM;
1095 
1096 	RCU_INIT_POINTER(devdata, new_devdata);
1097 
1098 	ret = vio_register_driver(&nx842_vio_driver);
1099 	if (ret) {
1100 		pr_err("Could not register VIO driver %d\n", ret);
1101 
1102 		kfree(new_devdata);
1103 		return ret;
1104 	}
1105 
1106 	return 0;
1107 }
1108 
1109 module_init(nx842_pseries_init);
1110 
nx842_pseries_exit(void)1111 static void __exit nx842_pseries_exit(void)
1112 {
1113 	struct nx842_devdata *old_devdata;
1114 	unsigned long flags;
1115 
1116 	crypto_unregister_alg(&nx842_pseries_alg);
1117 
1118 	spin_lock_irqsave(&devdata_mutex, flags);
1119 	old_devdata = rcu_dereference_check(devdata,
1120 			lockdep_is_held(&devdata_mutex));
1121 	RCU_INIT_POINTER(devdata, NULL);
1122 	spin_unlock_irqrestore(&devdata_mutex, flags);
1123 	synchronize_rcu();
1124 	if (old_devdata && old_devdata->dev)
1125 		dev_set_drvdata(old_devdata->dev, NULL);
1126 	kfree(old_devdata);
1127 	vio_unregister_driver(&nx842_vio_driver);
1128 }
1129 
1130 module_exit(nx842_pseries_exit);
1131 
1132