1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Arm Firmware Framework for ARMv8-A(FFA) interface driver
4  *
5  * The Arm FFA specification[1] describes a software architecture to
6  * leverages the virtualization extension to isolate software images
7  * provided by an ecosystem of vendors from each other and describes
8  * interfaces that standardize communication between the various software
9  * images including communication between images in the Secure world and
10  * Normal world. Any Hypervisor could use the FFA interfaces to enable
11  * communication between VMs it manages.
12  *
13  * The Hypervisor a.k.a Partition managers in FFA terminology can assign
14  * system resources(Memory regions, Devices, CPU cycles) to the partitions
15  * and manage isolation amongst them.
16  *
17  * [1] https://developer.arm.com/docs/den0077/latest
18  *
19  * Copyright (C) 2021 ARM Ltd.
20  */
21 
22 #define DRIVER_NAME "ARM FF-A"
23 #define pr_fmt(fmt) DRIVER_NAME ": " fmt
24 
25 #include <linux/acpi.h>
26 #include <linux/arm_ffa.h>
27 #include <linux/bitfield.h>
28 #include <linux/cpuhotplug.h>
29 #include <linux/delay.h>
30 #include <linux/device.h>
31 #include <linux/hashtable.h>
32 #include <linux/interrupt.h>
33 #include <linux/io.h>
34 #include <linux/kernel.h>
35 #include <linux/module.h>
36 #include <linux/mm.h>
37 #include <linux/mutex.h>
38 #include <linux/of_irq.h>
39 #include <linux/scatterlist.h>
40 #include <linux/slab.h>
41 #include <linux/smp.h>
42 #include <linux/uuid.h>
43 #include <linux/xarray.h>
44 
45 #include "common.h"
46 
47 #define FFA_DRIVER_VERSION	FFA_VERSION_1_2
48 #define FFA_MIN_VERSION		FFA_VERSION_1_0
49 
50 #define SENDER_ID_MASK		GENMASK(31, 16)
51 #define RECEIVER_ID_MASK	GENMASK(15, 0)
52 #define SENDER_ID(x)		((u16)(FIELD_GET(SENDER_ID_MASK, (x))))
53 #define RECEIVER_ID(x)		((u16)(FIELD_GET(RECEIVER_ID_MASK, (x))))
54 #define PACK_TARGET_INFO(s, r)		\
55 	(FIELD_PREP(SENDER_ID_MASK, (s)) | FIELD_PREP(RECEIVER_ID_MASK, (r)))
56 
57 #define RXTX_MAP_MIN_BUFSZ_MASK	GENMASK(1, 0)
58 #define RXTX_MAP_MIN_BUFSZ(x)	((x) & RXTX_MAP_MIN_BUFSZ_MASK)
59 
60 #define FFA_MAX_NOTIFICATIONS		64
61 
62 static ffa_fn *invoke_ffa_fn;
63 
64 static const int ffa_linux_errmap[] = {
65 	/* better than switch case as long as return value is continuous */
66 	0,		/* FFA_RET_SUCCESS */
67 	-EOPNOTSUPP,	/* FFA_RET_NOT_SUPPORTED */
68 	-EINVAL,	/* FFA_RET_INVALID_PARAMETERS */
69 	-ENOMEM,	/* FFA_RET_NO_MEMORY */
70 	-EBUSY,		/* FFA_RET_BUSY */
71 	-EINTR,		/* FFA_RET_INTERRUPTED */
72 	-EACCES,	/* FFA_RET_DENIED */
73 	-EAGAIN,	/* FFA_RET_RETRY */
74 	-ECANCELED,	/* FFA_RET_ABORTED */
75 	-ENODATA,	/* FFA_RET_NO_DATA */
76 	-EAGAIN,	/* FFA_RET_NOT_READY */
77 };
78 
79 static inline int ffa_to_linux_errno(int errno)
80 {
81 	int err_idx = -errno;
82 
83 	if (err_idx >= 0 && err_idx < ARRAY_SIZE(ffa_linux_errmap))
84 		return ffa_linux_errmap[err_idx];
85 	return -EINVAL;
86 }
87 
88 struct ffa_pcpu_irq {
89 	struct ffa_drv_info *info;
90 };
91 
92 struct ffa_drv_info {
93 	u32 version;
94 	u16 vm_id;
95 	struct mutex rx_lock; /* lock to protect Rx buffer */
96 	struct mutex tx_lock; /* lock to protect Tx buffer */
97 	void *rx_buffer;
98 	void *tx_buffer;
99 	size_t rxtx_bufsz;
100 	bool mem_ops_native;
101 	bool msg_direct_req2_supp;
102 	bool bitmap_created;
103 	bool notif_enabled;
104 	unsigned int sched_recv_irq;
105 	unsigned int notif_pend_irq;
106 	unsigned int cpuhp_state;
107 	struct ffa_pcpu_irq __percpu *irq_pcpu;
108 	struct workqueue_struct *notif_pcpu_wq;
109 	struct work_struct notif_pcpu_work;
110 	struct work_struct sched_recv_irq_work;
111 	struct xarray partition_info;
112 	DECLARE_HASHTABLE(notifier_hash, ilog2(FFA_MAX_NOTIFICATIONS));
113 	struct mutex notify_lock; /* lock to protect notifier hashtable  */
114 };
115 
116 static struct ffa_drv_info *drv_info;
117 
118 /*
119  * The driver must be able to support all the versions from the earliest
120  * supported FFA_MIN_VERSION to the latest supported FFA_DRIVER_VERSION.
121  * The specification states that if firmware supports a FFA implementation
122  * that is incompatible with and at a greater version number than specified
123  * by the caller(FFA_DRIVER_VERSION passed as parameter to FFA_VERSION),
124  * it must return the NOT_SUPPORTED error code.
125  */
126 static u32 ffa_compatible_version_find(u32 version)
127 {
128 	u16 major = FFA_MAJOR_VERSION(version), minor = FFA_MINOR_VERSION(version);
129 	u16 drv_major = FFA_MAJOR_VERSION(FFA_DRIVER_VERSION);
130 	u16 drv_minor = FFA_MINOR_VERSION(FFA_DRIVER_VERSION);
131 
132 	if ((major < drv_major) || (major == drv_major && minor <= drv_minor))
133 		return version;
134 
135 	pr_info("Firmware version higher than driver version, downgrading\n");
136 	return FFA_DRIVER_VERSION;
137 }
138 
139 static int ffa_version_check(u32 *version)
140 {
141 	ffa_value_t ver;
142 
143 	invoke_ffa_fn((ffa_value_t){
144 		      .a0 = FFA_VERSION, .a1 = FFA_DRIVER_VERSION,
145 		      }, &ver);
146 
147 	if ((s32)ver.a0 == FFA_RET_NOT_SUPPORTED) {
148 		pr_info("FFA_VERSION returned not supported\n");
149 		return -EOPNOTSUPP;
150 	}
151 
152 	if (FFA_MAJOR_VERSION(ver.a0) > FFA_MAJOR_VERSION(FFA_DRIVER_VERSION)) {
153 		pr_err("Incompatible v%d.%d! Latest supported v%d.%d\n",
154 		       FFA_MAJOR_VERSION(ver.a0), FFA_MINOR_VERSION(ver.a0),
155 		       FFA_MAJOR_VERSION(FFA_DRIVER_VERSION),
156 		       FFA_MINOR_VERSION(FFA_DRIVER_VERSION));
157 		return -EINVAL;
158 	}
159 
160 	if (ver.a0 < FFA_MIN_VERSION) {
161 		pr_err("Incompatible v%d.%d! Earliest supported v%d.%d\n",
162 		       FFA_MAJOR_VERSION(ver.a0), FFA_MINOR_VERSION(ver.a0),
163 		       FFA_MAJOR_VERSION(FFA_MIN_VERSION),
164 		       FFA_MINOR_VERSION(FFA_MIN_VERSION));
165 		return -EINVAL;
166 	}
167 
168 	pr_info("Driver version %d.%d\n", FFA_MAJOR_VERSION(FFA_DRIVER_VERSION),
169 		FFA_MINOR_VERSION(FFA_DRIVER_VERSION));
170 	pr_info("Firmware version %d.%d found\n", FFA_MAJOR_VERSION(ver.a0),
171 		FFA_MINOR_VERSION(ver.a0));
172 	*version = ffa_compatible_version_find(ver.a0);
173 
174 	return 0;
175 }
176 
177 static int ffa_rx_release(void)
178 {
179 	ffa_value_t ret;
180 
181 	invoke_ffa_fn((ffa_value_t){
182 		      .a0 = FFA_RX_RELEASE,
183 		      }, &ret);
184 
185 	if (ret.a0 == FFA_ERROR)
186 		return ffa_to_linux_errno((int)ret.a2);
187 
188 	/* check for ret.a0 == FFA_RX_RELEASE ? */
189 
190 	return 0;
191 }
192 
193 static int ffa_rxtx_map(phys_addr_t tx_buf, phys_addr_t rx_buf, u32 pg_cnt)
194 {
195 	ffa_value_t ret;
196 
197 	invoke_ffa_fn((ffa_value_t){
198 		      .a0 = FFA_FN_NATIVE(RXTX_MAP),
199 		      .a1 = tx_buf, .a2 = rx_buf, .a3 = pg_cnt,
200 		      }, &ret);
201 
202 	if (ret.a0 == FFA_ERROR)
203 		return ffa_to_linux_errno((int)ret.a2);
204 
205 	return 0;
206 }
207 
208 static int ffa_rxtx_unmap(u16 vm_id)
209 {
210 	ffa_value_t ret;
211 
212 	invoke_ffa_fn((ffa_value_t){
213 		      .a0 = FFA_RXTX_UNMAP, .a1 = PACK_TARGET_INFO(vm_id, 0),
214 		      }, &ret);
215 
216 	if (ret.a0 == FFA_ERROR)
217 		return ffa_to_linux_errno((int)ret.a2);
218 
219 	return 0;
220 }
221 
222 static int ffa_features(u32 func_feat_id, u32 input_props,
223 			u32 *if_props_1, u32 *if_props_2)
224 {
225 	ffa_value_t id;
226 
227 	if (!ARM_SMCCC_IS_FAST_CALL(func_feat_id) && input_props) {
228 		pr_err("%s: Invalid Parameters: %x, %x", __func__,
229 		       func_feat_id, input_props);
230 		return ffa_to_linux_errno(FFA_RET_INVALID_PARAMETERS);
231 	}
232 
233 	invoke_ffa_fn((ffa_value_t){
234 		.a0 = FFA_FEATURES, .a1 = func_feat_id, .a2 = input_props,
235 		}, &id);
236 
237 	if (id.a0 == FFA_ERROR)
238 		return ffa_to_linux_errno((int)id.a2);
239 
240 	if (if_props_1)
241 		*if_props_1 = id.a2;
242 	if (if_props_2)
243 		*if_props_2 = id.a3;
244 
245 	return 0;
246 }
247 
248 #define PARTITION_INFO_GET_RETURN_COUNT_ONLY	BIT(0)
249 
250 /* buffer must be sizeof(struct ffa_partition_info) * num_partitions */
251 static int
252 __ffa_partition_info_get(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3,
253 			 struct ffa_partition_info *buffer, int num_partitions)
254 {
255 	int idx, count, flags = 0, sz, buf_sz;
256 	ffa_value_t partition_info;
257 
258 	if (drv_info->version > FFA_VERSION_1_0 &&
259 	    (!buffer || !num_partitions)) /* Just get the count for now */
260 		flags = PARTITION_INFO_GET_RETURN_COUNT_ONLY;
261 
262 	mutex_lock(&drv_info->rx_lock);
263 	invoke_ffa_fn((ffa_value_t){
264 		      .a0 = FFA_PARTITION_INFO_GET,
265 		      .a1 = uuid0, .a2 = uuid1, .a3 = uuid2, .a4 = uuid3,
266 		      .a5 = flags,
267 		      }, &partition_info);
268 
269 	if (partition_info.a0 == FFA_ERROR) {
270 		mutex_unlock(&drv_info->rx_lock);
271 		return ffa_to_linux_errno((int)partition_info.a2);
272 	}
273 
274 	count = partition_info.a2;
275 
276 	if (drv_info->version > FFA_VERSION_1_0) {
277 		buf_sz = sz = partition_info.a3;
278 		if (sz > sizeof(*buffer))
279 			buf_sz = sizeof(*buffer);
280 	} else {
281 		/* FFA_VERSION_1_0 lacks size in the response */
282 		buf_sz = sz = 8;
283 	}
284 
285 	if (buffer && count <= num_partitions)
286 		for (idx = 0; idx < count; idx++) {
287 			struct ffa_partition_info_le {
288 				__le16 id;
289 				__le16 exec_ctxt;
290 				__le32 properties;
291 				uuid_t uuid;
292 			} *rx_buf = drv_info->rx_buffer + idx * sz;
293 			struct ffa_partition_info *buf = buffer + idx;
294 
295 			buf->id = le16_to_cpu(rx_buf->id);
296 			buf->exec_ctxt = le16_to_cpu(rx_buf->exec_ctxt);
297 			buf->properties = le32_to_cpu(rx_buf->properties);
298 			if (buf_sz > 8)
299 				import_uuid(&buf->uuid, (u8 *)&rx_buf->uuid);
300 		}
301 
302 	if (!(flags & PARTITION_INFO_GET_RETURN_COUNT_ONLY))
303 		ffa_rx_release();
304 
305 	mutex_unlock(&drv_info->rx_lock);
306 
307 	return count;
308 }
309 
310 #define LAST_INDEX_MASK		GENMASK(15, 0)
311 #define CURRENT_INDEX_MASK	GENMASK(31, 16)
312 #define UUID_INFO_TAG_MASK	GENMASK(47, 32)
313 #define PARTITION_INFO_SZ_MASK	GENMASK(63, 48)
314 #define PARTITION_COUNT(x)	((u16)(FIELD_GET(LAST_INDEX_MASK, (x))) + 1)
315 #define CURRENT_INDEX(x)	((u16)(FIELD_GET(CURRENT_INDEX_MASK, (x))))
316 #define UUID_INFO_TAG(x)	((u16)(FIELD_GET(UUID_INFO_TAG_MASK, (x))))
317 #define PARTITION_INFO_SZ(x)	((u16)(FIELD_GET(PARTITION_INFO_SZ_MASK, (x))))
318 #define PART_INFO_ID_MASK	GENMASK(15, 0)
319 #define PART_INFO_EXEC_CXT_MASK	GENMASK(31, 16)
320 #define PART_INFO_PROPS_MASK	GENMASK(63, 32)
321 #define PART_INFO_ID(x)		((u16)(FIELD_GET(PART_INFO_ID_MASK, (x))))
322 #define PART_INFO_EXEC_CXT(x)	((u16)(FIELD_GET(PART_INFO_EXEC_CXT_MASK, (x))))
323 #define PART_INFO_PROPERTIES(x)	((u32)(FIELD_GET(PART_INFO_PROPS_MASK, (x))))
324 static int
325 __ffa_partition_info_get_regs(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3,
326 			      struct ffa_partition_info *buffer, int num_parts)
327 {
328 	u16 buf_sz, start_idx, cur_idx, count = 0, prev_idx = 0, tag = 0;
329 	struct ffa_partition_info *buf = buffer;
330 	ffa_value_t partition_info;
331 
332 	do {
333 		__le64 *regs;
334 		int idx;
335 
336 		start_idx = prev_idx ? prev_idx + 1 : 0;
337 
338 		invoke_ffa_fn((ffa_value_t){
339 			      .a0 = FFA_PARTITION_INFO_GET_REGS,
340 			      .a1 = (u64)uuid1 << 32 | uuid0,
341 			      .a2 = (u64)uuid3 << 32 | uuid2,
342 			      .a3 = start_idx | tag << 16,
343 			      }, &partition_info);
344 
345 		if (partition_info.a0 == FFA_ERROR)
346 			return ffa_to_linux_errno((int)partition_info.a2);
347 
348 		if (!count)
349 			count = PARTITION_COUNT(partition_info.a2);
350 		if (!buffer || !num_parts) /* count only */
351 			return count;
352 
353 		cur_idx = CURRENT_INDEX(partition_info.a2);
354 		tag = UUID_INFO_TAG(partition_info.a2);
355 		buf_sz = PARTITION_INFO_SZ(partition_info.a2);
356 		if (buf_sz > sizeof(*buffer))
357 			buf_sz = sizeof(*buffer);
358 
359 		regs = (void *)&partition_info.a3;
360 		for (idx = 0; idx < cur_idx - start_idx + 1; idx++, buf++) {
361 			union {
362 				uuid_t uuid;
363 				u64 regs[2];
364 			} uuid_regs = {
365 				.regs = {
366 					le64_to_cpu(*(regs + 1)),
367 					le64_to_cpu(*(regs + 2)),
368 					}
369 			};
370 			u64 val = *(u64 *)regs;
371 
372 			buf->id = PART_INFO_ID(val);
373 			buf->exec_ctxt = PART_INFO_EXEC_CXT(val);
374 			buf->properties = PART_INFO_PROPERTIES(val);
375 			uuid_copy(&buf->uuid, &uuid_regs.uuid);
376 			regs += 3;
377 		}
378 		prev_idx = cur_idx;
379 
380 	} while (cur_idx < (count - 1));
381 
382 	return count;
383 }
384 
385 /* buffer is allocated and caller must free the same if returned count > 0 */
386 static int
387 ffa_partition_probe(const uuid_t *uuid, struct ffa_partition_info **buffer)
388 {
389 	int count;
390 	u32 uuid0_4[4];
391 	bool reg_mode = false;
392 	struct ffa_partition_info *pbuf;
393 
394 	if (!ffa_features(FFA_PARTITION_INFO_GET_REGS, 0, NULL, NULL))
395 		reg_mode = true;
396 
397 	export_uuid((u8 *)uuid0_4, uuid);
398 	if (reg_mode)
399 		count = __ffa_partition_info_get_regs(uuid0_4[0], uuid0_4[1],
400 						      uuid0_4[2], uuid0_4[3],
401 						      NULL, 0);
402 	else
403 		count = __ffa_partition_info_get(uuid0_4[0], uuid0_4[1],
404 						 uuid0_4[2], uuid0_4[3],
405 						 NULL, 0);
406 	if (count <= 0)
407 		return count;
408 
409 	pbuf = kcalloc(count, sizeof(*pbuf), GFP_KERNEL);
410 	if (!pbuf)
411 		return -ENOMEM;
412 
413 	if (reg_mode)
414 		count = __ffa_partition_info_get_regs(uuid0_4[0], uuid0_4[1],
415 						      uuid0_4[2], uuid0_4[3],
416 						      pbuf, count);
417 	else
418 		count = __ffa_partition_info_get(uuid0_4[0], uuid0_4[1],
419 						 uuid0_4[2], uuid0_4[3],
420 						 pbuf, count);
421 	if (count <= 0)
422 		kfree(pbuf);
423 	else
424 		*buffer = pbuf;
425 
426 	return count;
427 }
428 
429 #define VM_ID_MASK	GENMASK(15, 0)
430 static int ffa_id_get(u16 *vm_id)
431 {
432 	ffa_value_t id;
433 
434 	invoke_ffa_fn((ffa_value_t){
435 		      .a0 = FFA_ID_GET,
436 		      }, &id);
437 
438 	if (id.a0 == FFA_ERROR)
439 		return ffa_to_linux_errno((int)id.a2);
440 
441 	*vm_id = FIELD_GET(VM_ID_MASK, (id.a2));
442 
443 	return 0;
444 }
445 
446 static inline void ffa_msg_send_wait_for_completion(ffa_value_t *ret)
447 {
448 	while (ret->a0 == FFA_INTERRUPT || ret->a0 == FFA_YIELD) {
449 		if (ret->a0 == FFA_YIELD)
450 			fsleep(1000);
451 
452 		invoke_ffa_fn((ffa_value_t){
453 			      .a0 = FFA_RUN, .a1 = ret->a1,
454 			      }, ret);
455 	}
456 }
457 
458 static int ffa_msg_send_direct_req(u16 src_id, u16 dst_id, bool mode_32bit,
459 				   struct ffa_send_direct_data *data)
460 {
461 	u32 req_id, resp_id, src_dst_ids = PACK_TARGET_INFO(src_id, dst_id);
462 	ffa_value_t ret;
463 
464 	if (mode_32bit) {
465 		req_id = FFA_MSG_SEND_DIRECT_REQ;
466 		resp_id = FFA_MSG_SEND_DIRECT_RESP;
467 	} else {
468 		req_id = FFA_FN_NATIVE(MSG_SEND_DIRECT_REQ);
469 		resp_id = FFA_FN_NATIVE(MSG_SEND_DIRECT_RESP);
470 	}
471 
472 	invoke_ffa_fn((ffa_value_t){
473 		      .a0 = req_id, .a1 = src_dst_ids, .a2 = 0,
474 		      .a3 = data->data0, .a4 = data->data1, .a5 = data->data2,
475 		      .a6 = data->data3, .a7 = data->data4,
476 		      }, &ret);
477 
478 	ffa_msg_send_wait_for_completion(&ret);
479 
480 	if (ret.a0 == FFA_ERROR)
481 		return ffa_to_linux_errno((int)ret.a2);
482 
483 	if (ret.a0 == resp_id) {
484 		data->data0 = ret.a3;
485 		data->data1 = ret.a4;
486 		data->data2 = ret.a5;
487 		data->data3 = ret.a6;
488 		data->data4 = ret.a7;
489 		return 0;
490 	}
491 
492 	return -EINVAL;
493 }
494 
495 static int ffa_msg_send2(struct ffa_device *dev, u16 src_id, void *buf, size_t sz)
496 {
497 	u32 src_dst_ids = PACK_TARGET_INFO(src_id, dev->vm_id);
498 	struct ffa_indirect_msg_hdr *msg;
499 	ffa_value_t ret;
500 	int retval = 0;
501 
502 	if (sz > (drv_info->rxtx_bufsz - sizeof(*msg)))
503 		return -ERANGE;
504 
505 	mutex_lock(&drv_info->tx_lock);
506 
507 	msg = drv_info->tx_buffer;
508 	msg->flags = 0;
509 	msg->res0 = 0;
510 	msg->offset = sizeof(*msg);
511 	msg->send_recv_id = src_dst_ids;
512 	msg->size = sz;
513 	uuid_copy(&msg->uuid, &dev->uuid);
514 	memcpy((u8 *)msg + msg->offset, buf, sz);
515 
516 	/* flags = 0, sender VMID = 0 works for both physical/virtual NS */
517 	invoke_ffa_fn((ffa_value_t){
518 		      .a0 = FFA_MSG_SEND2, .a1 = 0, .a2 = 0
519 		      }, &ret);
520 
521 	if (ret.a0 == FFA_ERROR)
522 		retval = ffa_to_linux_errno((int)ret.a2);
523 
524 	mutex_unlock(&drv_info->tx_lock);
525 	return retval;
526 }
527 
528 static int ffa_msg_send_direct_req2(u16 src_id, u16 dst_id, const uuid_t *uuid,
529 				    struct ffa_send_direct_data2 *data)
530 {
531 	u32 src_dst_ids = PACK_TARGET_INFO(src_id, dst_id);
532 	union {
533 		uuid_t uuid;
534 		__le64 regs[2];
535 	} uuid_regs = { .uuid = *uuid };
536 	ffa_value_t ret, args = {
537 		.a0 = FFA_MSG_SEND_DIRECT_REQ2,
538 		.a1 = src_dst_ids,
539 		.a2 = le64_to_cpu(uuid_regs.regs[0]),
540 		.a3 = le64_to_cpu(uuid_regs.regs[1]),
541 	};
542 	memcpy((void *)&args + offsetof(ffa_value_t, a4), data, sizeof(*data));
543 
544 	invoke_ffa_fn(args, &ret);
545 
546 	ffa_msg_send_wait_for_completion(&ret);
547 
548 	if (ret.a0 == FFA_ERROR)
549 		return ffa_to_linux_errno((int)ret.a2);
550 
551 	if (ret.a0 == FFA_MSG_SEND_DIRECT_RESP2) {
552 		memcpy(data, (void *)&ret + offsetof(ffa_value_t, a4), sizeof(*data));
553 		return 0;
554 	}
555 
556 	return -EINVAL;
557 }
558 
559 static int ffa_mem_first_frag(u32 func_id, phys_addr_t buf, u32 buf_sz,
560 			      u32 frag_len, u32 len, u64 *handle)
561 {
562 	ffa_value_t ret;
563 
564 	invoke_ffa_fn((ffa_value_t){
565 		      .a0 = func_id, .a1 = len, .a2 = frag_len,
566 		      .a3 = buf, .a4 = buf_sz,
567 		      }, &ret);
568 
569 	while (ret.a0 == FFA_MEM_OP_PAUSE)
570 		invoke_ffa_fn((ffa_value_t){
571 			      .a0 = FFA_MEM_OP_RESUME,
572 			      .a1 = ret.a1, .a2 = ret.a2,
573 			      }, &ret);
574 
575 	if (ret.a0 == FFA_ERROR)
576 		return ffa_to_linux_errno((int)ret.a2);
577 
578 	if (ret.a0 == FFA_SUCCESS) {
579 		if (handle)
580 			*handle = PACK_HANDLE(ret.a2, ret.a3);
581 	} else if (ret.a0 == FFA_MEM_FRAG_RX) {
582 		if (handle)
583 			*handle = PACK_HANDLE(ret.a1, ret.a2);
584 	} else {
585 		return -EOPNOTSUPP;
586 	}
587 
588 	return frag_len;
589 }
590 
591 static int ffa_mem_next_frag(u64 handle, u32 frag_len)
592 {
593 	ffa_value_t ret;
594 
595 	invoke_ffa_fn((ffa_value_t){
596 		      .a0 = FFA_MEM_FRAG_TX,
597 		      .a1 = HANDLE_LOW(handle), .a2 = HANDLE_HIGH(handle),
598 		      .a3 = frag_len,
599 		      }, &ret);
600 
601 	while (ret.a0 == FFA_MEM_OP_PAUSE)
602 		invoke_ffa_fn((ffa_value_t){
603 			      .a0 = FFA_MEM_OP_RESUME,
604 			      .a1 = ret.a1, .a2 = ret.a2,
605 			      }, &ret);
606 
607 	if (ret.a0 == FFA_ERROR)
608 		return ffa_to_linux_errno((int)ret.a2);
609 
610 	if (ret.a0 == FFA_MEM_FRAG_RX)
611 		return ret.a3;
612 	else if (ret.a0 == FFA_SUCCESS)
613 		return 0;
614 
615 	return -EOPNOTSUPP;
616 }
617 
618 static int
619 ffa_transmit_fragment(u32 func_id, phys_addr_t buf, u32 buf_sz, u32 frag_len,
620 		      u32 len, u64 *handle, bool first)
621 {
622 	if (!first)
623 		return ffa_mem_next_frag(*handle, frag_len);
624 
625 	return ffa_mem_first_frag(func_id, buf, buf_sz, frag_len, len, handle);
626 }
627 
628 static u32 ffa_get_num_pages_sg(struct scatterlist *sg)
629 {
630 	u32 num_pages = 0;
631 
632 	do {
633 		num_pages += sg->length / FFA_PAGE_SIZE;
634 	} while ((sg = sg_next(sg)));
635 
636 	return num_pages;
637 }
638 
639 static u16 ffa_memory_attributes_get(u32 func_id)
640 {
641 	/*
642 	 * For the memory lend or donate operation, if the receiver is a PE or
643 	 * a proxy endpoint, the owner/sender must not specify the attributes
644 	 */
645 	if (func_id == FFA_FN_NATIVE(MEM_LEND) ||
646 	    func_id == FFA_MEM_LEND)
647 		return 0;
648 
649 	return FFA_MEM_NORMAL | FFA_MEM_WRITE_BACK | FFA_MEM_INNER_SHAREABLE;
650 }
651 
652 static int
653 ffa_setup_and_transmit(u32 func_id, void *buffer, u32 max_fragsize,
654 		       struct ffa_mem_ops_args *args)
655 {
656 	int rc = 0;
657 	bool first = true;
658 	u32 composite_offset;
659 	phys_addr_t addr = 0;
660 	struct ffa_mem_region *mem_region = buffer;
661 	struct ffa_composite_mem_region *composite;
662 	struct ffa_mem_region_addr_range *constituents;
663 	struct ffa_mem_region_attributes *ep_mem_access;
664 	u32 idx, frag_len, length, buf_sz = 0, num_entries = sg_nents(args->sg);
665 
666 	mem_region->tag = args->tag;
667 	mem_region->flags = args->flags;
668 	mem_region->sender_id = drv_info->vm_id;
669 	mem_region->attributes = ffa_memory_attributes_get(func_id);
670 	ep_mem_access = buffer +
671 			ffa_mem_desc_offset(buffer, 0, drv_info->version);
672 	composite_offset = ffa_mem_desc_offset(buffer, args->nattrs,
673 					       drv_info->version);
674 
675 	for (idx = 0; idx < args->nattrs; idx++, ep_mem_access++) {
676 		ep_mem_access->receiver = args->attrs[idx].receiver;
677 		ep_mem_access->attrs = args->attrs[idx].attrs;
678 		ep_mem_access->composite_off = composite_offset;
679 		ep_mem_access->flag = 0;
680 		ep_mem_access->reserved = 0;
681 	}
682 	mem_region->handle = 0;
683 	mem_region->ep_count = args->nattrs;
684 	if (drv_info->version <= FFA_VERSION_1_0) {
685 		mem_region->ep_mem_size = 0;
686 	} else {
687 		mem_region->ep_mem_size = sizeof(*ep_mem_access);
688 		mem_region->ep_mem_offset = sizeof(*mem_region);
689 		memset(mem_region->reserved, 0, 12);
690 	}
691 
692 	composite = buffer + composite_offset;
693 	composite->total_pg_cnt = ffa_get_num_pages_sg(args->sg);
694 	composite->addr_range_cnt = num_entries;
695 	composite->reserved = 0;
696 
697 	length = composite_offset + CONSTITUENTS_OFFSET(num_entries);
698 	frag_len = composite_offset + CONSTITUENTS_OFFSET(0);
699 	if (frag_len > max_fragsize)
700 		return -ENXIO;
701 
702 	if (!args->use_txbuf) {
703 		addr = virt_to_phys(buffer);
704 		buf_sz = max_fragsize / FFA_PAGE_SIZE;
705 	}
706 
707 	constituents = buffer + frag_len;
708 	idx = 0;
709 	do {
710 		if (frag_len == max_fragsize) {
711 			rc = ffa_transmit_fragment(func_id, addr, buf_sz,
712 						   frag_len, length,
713 						   &args->g_handle, first);
714 			if (rc < 0)
715 				return -ENXIO;
716 
717 			first = false;
718 			idx = 0;
719 			frag_len = 0;
720 			constituents = buffer;
721 		}
722 
723 		if ((void *)constituents - buffer > max_fragsize) {
724 			pr_err("Memory Region Fragment > Tx Buffer size\n");
725 			return -EFAULT;
726 		}
727 
728 		constituents->address = sg_phys(args->sg);
729 		constituents->pg_cnt = args->sg->length / FFA_PAGE_SIZE;
730 		constituents->reserved = 0;
731 		constituents++;
732 		frag_len += sizeof(struct ffa_mem_region_addr_range);
733 	} while ((args->sg = sg_next(args->sg)));
734 
735 	return ffa_transmit_fragment(func_id, addr, buf_sz, frag_len,
736 				     length, &args->g_handle, first);
737 }
738 
739 static int ffa_memory_ops(u32 func_id, struct ffa_mem_ops_args *args)
740 {
741 	int ret;
742 	void *buffer;
743 	size_t rxtx_bufsz = drv_info->rxtx_bufsz;
744 
745 	if (!args->use_txbuf) {
746 		buffer = alloc_pages_exact(rxtx_bufsz, GFP_KERNEL);
747 		if (!buffer)
748 			return -ENOMEM;
749 	} else {
750 		buffer = drv_info->tx_buffer;
751 		mutex_lock(&drv_info->tx_lock);
752 	}
753 
754 	ret = ffa_setup_and_transmit(func_id, buffer, rxtx_bufsz, args);
755 
756 	if (args->use_txbuf)
757 		mutex_unlock(&drv_info->tx_lock);
758 	else
759 		free_pages_exact(buffer, rxtx_bufsz);
760 
761 	return ret < 0 ? ret : 0;
762 }
763 
764 static int ffa_memory_reclaim(u64 g_handle, u32 flags)
765 {
766 	ffa_value_t ret;
767 
768 	invoke_ffa_fn((ffa_value_t){
769 		      .a0 = FFA_MEM_RECLAIM,
770 		      .a1 = HANDLE_LOW(g_handle), .a2 = HANDLE_HIGH(g_handle),
771 		      .a3 = flags,
772 		      }, &ret);
773 
774 	if (ret.a0 == FFA_ERROR)
775 		return ffa_to_linux_errno((int)ret.a2);
776 
777 	return 0;
778 }
779 
780 static int ffa_notification_bitmap_create(void)
781 {
782 	ffa_value_t ret;
783 	u16 vcpu_count = nr_cpu_ids;
784 
785 	invoke_ffa_fn((ffa_value_t){
786 		      .a0 = FFA_NOTIFICATION_BITMAP_CREATE,
787 		      .a1 = drv_info->vm_id, .a2 = vcpu_count,
788 		      }, &ret);
789 
790 	if (ret.a0 == FFA_ERROR)
791 		return ffa_to_linux_errno((int)ret.a2);
792 
793 	return 0;
794 }
795 
796 static int ffa_notification_bitmap_destroy(void)
797 {
798 	ffa_value_t ret;
799 
800 	invoke_ffa_fn((ffa_value_t){
801 		      .a0 = FFA_NOTIFICATION_BITMAP_DESTROY,
802 		      .a1 = drv_info->vm_id,
803 		      }, &ret);
804 
805 	if (ret.a0 == FFA_ERROR)
806 		return ffa_to_linux_errno((int)ret.a2);
807 
808 	return 0;
809 }
810 
811 enum notify_type {
812 	SECURE_PARTITION,
813 	NON_SECURE_VM,
814 	SPM_FRAMEWORK,
815 	NS_HYP_FRAMEWORK,
816 };
817 
818 #define NOTIFICATION_LOW_MASK		GENMASK(31, 0)
819 #define NOTIFICATION_HIGH_MASK		GENMASK(63, 32)
820 #define NOTIFICATION_BITMAP_HIGH(x)	\
821 		((u32)(FIELD_GET(NOTIFICATION_HIGH_MASK, (x))))
822 #define NOTIFICATION_BITMAP_LOW(x)	\
823 		((u32)(FIELD_GET(NOTIFICATION_LOW_MASK, (x))))
824 #define PACK_NOTIFICATION_BITMAP(low, high)	\
825 	(FIELD_PREP(NOTIFICATION_LOW_MASK, (low)) | \
826 	 FIELD_PREP(NOTIFICATION_HIGH_MASK, (high)))
827 
828 #define RECEIVER_VCPU_MASK		GENMASK(31, 16)
829 #define PACK_NOTIFICATION_GET_RECEIVER_INFO(vcpu_r, r) \
830 	(FIELD_PREP(RECEIVER_VCPU_MASK, (vcpu_r)) | \
831 	 FIELD_PREP(RECEIVER_ID_MASK, (r)))
832 
833 #define NOTIFICATION_INFO_GET_MORE_PEND_MASK	BIT(0)
834 #define NOTIFICATION_INFO_GET_ID_COUNT		GENMASK(11, 7)
835 #define ID_LIST_MASK_64				GENMASK(51, 12)
836 #define ID_LIST_MASK_32				GENMASK(31, 12)
837 #define MAX_IDS_64				20
838 #define MAX_IDS_32				10
839 
840 #define PER_VCPU_NOTIFICATION_FLAG		BIT(0)
841 #define SECURE_PARTITION_BITMAP_ENABLE		BIT(SECURE_PARTITION)
842 #define NON_SECURE_VM_BITMAP_ENABLE		BIT(NON_SECURE_VM)
843 #define SPM_FRAMEWORK_BITMAP_ENABLE		BIT(SPM_FRAMEWORK)
844 #define NS_HYP_FRAMEWORK_BITMAP_ENABLE		BIT(NS_HYP_FRAMEWORK)
845 #define FFA_BITMAP_SECURE_ENABLE_MASK		\
846 	(SECURE_PARTITION_BITMAP_ENABLE | SPM_FRAMEWORK_BITMAP_ENABLE)
847 #define FFA_BITMAP_NS_ENABLE_MASK		\
848 	(NON_SECURE_VM_BITMAP_ENABLE | NS_HYP_FRAMEWORK_BITMAP_ENABLE)
849 #define FFA_BITMAP_ALL_ENABLE_MASK		\
850 	(FFA_BITMAP_SECURE_ENABLE_MASK | FFA_BITMAP_NS_ENABLE_MASK)
851 
852 #define FFA_SECURE_PARTITION_ID_FLAG		BIT(15)
853 
854 #define SPM_FRAMEWORK_BITMAP(x)			NOTIFICATION_BITMAP_LOW(x)
855 #define NS_HYP_FRAMEWORK_BITMAP(x)		NOTIFICATION_BITMAP_HIGH(x)
856 #define FRAMEWORK_NOTIFY_RX_BUFFER_FULL		BIT(0)
857 
858 static int ffa_notification_bind_common(u16 dst_id, u64 bitmap,
859 					u32 flags, bool is_bind)
860 {
861 	ffa_value_t ret;
862 	u32 func, src_dst_ids = PACK_TARGET_INFO(dst_id, drv_info->vm_id);
863 
864 	func = is_bind ? FFA_NOTIFICATION_BIND : FFA_NOTIFICATION_UNBIND;
865 
866 	invoke_ffa_fn((ffa_value_t){
867 		  .a0 = func, .a1 = src_dst_ids, .a2 = flags,
868 		  .a3 = NOTIFICATION_BITMAP_LOW(bitmap),
869 		  .a4 = NOTIFICATION_BITMAP_HIGH(bitmap),
870 		  }, &ret);
871 
872 	if (ret.a0 == FFA_ERROR)
873 		return ffa_to_linux_errno((int)ret.a2);
874 	else if (ret.a0 != FFA_SUCCESS)
875 		return -EINVAL;
876 
877 	return 0;
878 }
879 
880 static
881 int ffa_notification_set(u16 src_id, u16 dst_id, u32 flags, u64 bitmap)
882 {
883 	ffa_value_t ret;
884 	u32 src_dst_ids = PACK_TARGET_INFO(dst_id, src_id);
885 
886 	invoke_ffa_fn((ffa_value_t) {
887 		  .a0 = FFA_NOTIFICATION_SET, .a1 = src_dst_ids, .a2 = flags,
888 		  .a3 = NOTIFICATION_BITMAP_LOW(bitmap),
889 		  .a4 = NOTIFICATION_BITMAP_HIGH(bitmap),
890 		  }, &ret);
891 
892 	if (ret.a0 == FFA_ERROR)
893 		return ffa_to_linux_errno((int)ret.a2);
894 	else if (ret.a0 != FFA_SUCCESS)
895 		return -EINVAL;
896 
897 	return 0;
898 }
899 
900 struct ffa_notify_bitmaps {
901 	u64 sp_map;
902 	u64 vm_map;
903 	u64 arch_map;
904 };
905 
906 static int ffa_notification_get(u32 flags, struct ffa_notify_bitmaps *notify)
907 {
908 	ffa_value_t ret;
909 	u16 src_id = drv_info->vm_id;
910 	u16 cpu_id = smp_processor_id();
911 	u32 rec_vcpu_ids = PACK_NOTIFICATION_GET_RECEIVER_INFO(cpu_id, src_id);
912 
913 	invoke_ffa_fn((ffa_value_t){
914 		  .a0 = FFA_NOTIFICATION_GET, .a1 = rec_vcpu_ids, .a2 = flags,
915 		  }, &ret);
916 
917 	if (ret.a0 == FFA_ERROR)
918 		return ffa_to_linux_errno((int)ret.a2);
919 	else if (ret.a0 != FFA_SUCCESS)
920 		return -EINVAL; /* Something else went wrong. */
921 
922 	if (flags & SECURE_PARTITION_BITMAP_ENABLE)
923 		notify->sp_map = PACK_NOTIFICATION_BITMAP(ret.a2, ret.a3);
924 	if (flags & NON_SECURE_VM_BITMAP_ENABLE)
925 		notify->vm_map = PACK_NOTIFICATION_BITMAP(ret.a4, ret.a5);
926 	if (flags & SPM_FRAMEWORK_BITMAP_ENABLE)
927 		notify->arch_map = SPM_FRAMEWORK_BITMAP(ret.a6);
928 	if (flags & NS_HYP_FRAMEWORK_BITMAP_ENABLE)
929 		notify->arch_map = PACK_NOTIFICATION_BITMAP(notify->arch_map,
930 							    ret.a7);
931 
932 	return 0;
933 }
934 
935 struct ffa_dev_part_info {
936 	ffa_sched_recv_cb callback;
937 	void *cb_data;
938 	rwlock_t rw_lock;
939 	struct ffa_device *dev;
940 	struct list_head node;
941 };
942 
943 static void __do_sched_recv_cb(u16 part_id, u16 vcpu, bool is_per_vcpu)
944 {
945 	struct ffa_dev_part_info *partition = NULL, *tmp;
946 	ffa_sched_recv_cb callback;
947 	struct list_head *phead;
948 	void *cb_data;
949 
950 	phead = xa_load(&drv_info->partition_info, part_id);
951 	if (!phead) {
952 		pr_err("%s: Invalid partition ID 0x%x\n", __func__, part_id);
953 		return;
954 	}
955 
956 	list_for_each_entry_safe(partition, tmp, phead, node) {
957 		read_lock(&partition->rw_lock);
958 		callback = partition->callback;
959 		cb_data = partition->cb_data;
960 		read_unlock(&partition->rw_lock);
961 
962 		if (callback)
963 			callback(vcpu, is_per_vcpu, cb_data);
964 	}
965 }
966 
967 static void ffa_notification_info_get(void)
968 {
969 	int idx, list, max_ids, lists_cnt, ids_processed, ids_count[MAX_IDS_64];
970 	bool is_64b_resp;
971 	ffa_value_t ret;
972 	u64 id_list;
973 
974 	do {
975 		invoke_ffa_fn((ffa_value_t){
976 			  .a0 = FFA_FN_NATIVE(NOTIFICATION_INFO_GET),
977 			  }, &ret);
978 
979 		if (ret.a0 != FFA_FN_NATIVE(SUCCESS) && ret.a0 != FFA_SUCCESS) {
980 			if ((s32)ret.a2 != FFA_RET_NO_DATA)
981 				pr_err("Notification Info fetch failed: 0x%lx (0x%lx)",
982 				       ret.a0, ret.a2);
983 			return;
984 		}
985 
986 		is_64b_resp = (ret.a0 == FFA_FN64_SUCCESS);
987 
988 		ids_processed = 0;
989 		lists_cnt = FIELD_GET(NOTIFICATION_INFO_GET_ID_COUNT, ret.a2);
990 		if (is_64b_resp) {
991 			max_ids = MAX_IDS_64;
992 			id_list = FIELD_GET(ID_LIST_MASK_64, ret.a2);
993 		} else {
994 			max_ids = MAX_IDS_32;
995 			id_list = FIELD_GET(ID_LIST_MASK_32, ret.a2);
996 		}
997 
998 		for (idx = 0; idx < lists_cnt; idx++, id_list >>= 2)
999 			ids_count[idx] = (id_list & 0x3) + 1;
1000 
1001 		/* Process IDs */
1002 		for (list = 0; list < lists_cnt; list++) {
1003 			u16 vcpu_id, part_id, *packed_id_list = (u16 *)&ret.a3;
1004 
1005 			if (ids_processed >= max_ids - 1)
1006 				break;
1007 
1008 			part_id = packed_id_list[ids_processed++];
1009 
1010 			if (ids_count[list] == 1) { /* Global Notification */
1011 				__do_sched_recv_cb(part_id, 0, false);
1012 				continue;
1013 			}
1014 
1015 			/* Per vCPU Notification */
1016 			for (idx = 1; idx < ids_count[list]; idx++) {
1017 				if (ids_processed >= max_ids - 1)
1018 					break;
1019 
1020 				vcpu_id = packed_id_list[ids_processed++];
1021 
1022 				__do_sched_recv_cb(part_id, vcpu_id, true);
1023 			}
1024 		}
1025 	} while (ret.a2 & NOTIFICATION_INFO_GET_MORE_PEND_MASK);
1026 }
1027 
1028 static int ffa_run(struct ffa_device *dev, u16 vcpu)
1029 {
1030 	ffa_value_t ret;
1031 	u32 target = dev->vm_id << 16 | vcpu;
1032 
1033 	invoke_ffa_fn((ffa_value_t){ .a0 = FFA_RUN, .a1 = target, }, &ret);
1034 
1035 	while (ret.a0 == FFA_INTERRUPT)
1036 		invoke_ffa_fn((ffa_value_t){ .a0 = FFA_RUN, .a1 = ret.a1, },
1037 			      &ret);
1038 
1039 	if (ret.a0 == FFA_ERROR)
1040 		return ffa_to_linux_errno((int)ret.a2);
1041 
1042 	return 0;
1043 }
1044 
1045 static void ffa_drvinfo_flags_init(void)
1046 {
1047 	if (!ffa_features(FFA_FN_NATIVE(MEM_LEND), 0, NULL, NULL) ||
1048 	    !ffa_features(FFA_FN_NATIVE(MEM_SHARE), 0, NULL, NULL))
1049 		drv_info->mem_ops_native = true;
1050 
1051 	if (!ffa_features(FFA_MSG_SEND_DIRECT_REQ2, 0, NULL, NULL) ||
1052 	    !ffa_features(FFA_MSG_SEND_DIRECT_RESP2, 0, NULL, NULL))
1053 		drv_info->msg_direct_req2_supp = true;
1054 }
1055 
1056 static u32 ffa_api_version_get(void)
1057 {
1058 	return drv_info->version;
1059 }
1060 
1061 static int ffa_partition_info_get(const char *uuid_str,
1062 				  struct ffa_partition_info *buffer)
1063 {
1064 	int count;
1065 	uuid_t uuid;
1066 	struct ffa_partition_info *pbuf;
1067 
1068 	if (uuid_parse(uuid_str, &uuid)) {
1069 		pr_err("invalid uuid (%s)\n", uuid_str);
1070 		return -ENODEV;
1071 	}
1072 
1073 	count = ffa_partition_probe(&uuid, &pbuf);
1074 	if (count <= 0)
1075 		return -ENOENT;
1076 
1077 	memcpy(buffer, pbuf, sizeof(*pbuf) * count);
1078 	kfree(pbuf);
1079 	return 0;
1080 }
1081 
1082 static void ffa_mode_32bit_set(struct ffa_device *dev)
1083 {
1084 	dev->mode_32bit = true;
1085 }
1086 
1087 static int ffa_sync_send_receive(struct ffa_device *dev,
1088 				 struct ffa_send_direct_data *data)
1089 {
1090 	return ffa_msg_send_direct_req(drv_info->vm_id, dev->vm_id,
1091 				       dev->mode_32bit, data);
1092 }
1093 
1094 static int ffa_indirect_msg_send(struct ffa_device *dev, void *buf, size_t sz)
1095 {
1096 	return ffa_msg_send2(dev, drv_info->vm_id, buf, sz);
1097 }
1098 
1099 static int ffa_sync_send_receive2(struct ffa_device *dev,
1100 				  struct ffa_send_direct_data2 *data)
1101 {
1102 	if (!drv_info->msg_direct_req2_supp)
1103 		return -EOPNOTSUPP;
1104 
1105 	return ffa_msg_send_direct_req2(drv_info->vm_id, dev->vm_id,
1106 					&dev->uuid, data);
1107 }
1108 
1109 static int ffa_memory_share(struct ffa_mem_ops_args *args)
1110 {
1111 	if (drv_info->mem_ops_native)
1112 		return ffa_memory_ops(FFA_FN_NATIVE(MEM_SHARE), args);
1113 
1114 	return ffa_memory_ops(FFA_MEM_SHARE, args);
1115 }
1116 
1117 static int ffa_memory_lend(struct ffa_mem_ops_args *args)
1118 {
1119 	/* Note that upon a successful MEM_LEND request the caller
1120 	 * must ensure that the memory region specified is not accessed
1121 	 * until a successful MEM_RECALIM call has been made.
1122 	 * On systems with a hypervisor present this will been enforced,
1123 	 * however on systems without a hypervisor the responsibility
1124 	 * falls to the calling kernel driver to prevent access.
1125 	 */
1126 	if (drv_info->mem_ops_native)
1127 		return ffa_memory_ops(FFA_FN_NATIVE(MEM_LEND), args);
1128 
1129 	return ffa_memory_ops(FFA_MEM_LEND, args);
1130 }
1131 
1132 #define ffa_notifications_disabled()	(!drv_info->notif_enabled)
1133 
1134 struct notifier_cb_info {
1135 	struct hlist_node hnode;
1136 	struct ffa_device *dev;
1137 	ffa_fwk_notifier_cb fwk_cb;
1138 	ffa_notifier_cb cb;
1139 	void *cb_data;
1140 };
1141 
1142 static int
1143 ffa_sched_recv_cb_update(struct ffa_device *dev, ffa_sched_recv_cb callback,
1144 			 void *cb_data, bool is_registration)
1145 {
1146 	struct ffa_dev_part_info *partition = NULL, *tmp;
1147 	struct list_head *phead;
1148 	bool cb_valid;
1149 
1150 	if (ffa_notifications_disabled())
1151 		return -EOPNOTSUPP;
1152 
1153 	phead = xa_load(&drv_info->partition_info, dev->vm_id);
1154 	if (!phead) {
1155 		pr_err("%s: Invalid partition ID 0x%x\n", __func__, dev->vm_id);
1156 		return -EINVAL;
1157 	}
1158 
1159 	list_for_each_entry_safe(partition, tmp, phead, node)
1160 		if (partition->dev == dev)
1161 			break;
1162 
1163 	if (!partition) {
1164 		pr_err("%s: No such partition ID 0x%x\n", __func__, dev->vm_id);
1165 		return -EINVAL;
1166 	}
1167 
1168 	write_lock(&partition->rw_lock);
1169 
1170 	cb_valid = !!partition->callback;
1171 	if (!(is_registration ^ cb_valid)) {
1172 		write_unlock(&partition->rw_lock);
1173 		return -EINVAL;
1174 	}
1175 
1176 	partition->callback = callback;
1177 	partition->cb_data = cb_data;
1178 
1179 	write_unlock(&partition->rw_lock);
1180 	return 0;
1181 }
1182 
1183 static int ffa_sched_recv_cb_register(struct ffa_device *dev,
1184 				      ffa_sched_recv_cb cb, void *cb_data)
1185 {
1186 	return ffa_sched_recv_cb_update(dev, cb, cb_data, true);
1187 }
1188 
1189 static int ffa_sched_recv_cb_unregister(struct ffa_device *dev)
1190 {
1191 	return ffa_sched_recv_cb_update(dev, NULL, NULL, false);
1192 }
1193 
1194 static int ffa_notification_bind(u16 dst_id, u64 bitmap, u32 flags)
1195 {
1196 	return ffa_notification_bind_common(dst_id, bitmap, flags, true);
1197 }
1198 
1199 static int ffa_notification_unbind(u16 dst_id, u64 bitmap)
1200 {
1201 	return ffa_notification_bind_common(dst_id, bitmap, 0, false);
1202 }
1203 
1204 static enum notify_type ffa_notify_type_get(u16 vm_id)
1205 {
1206 	if (vm_id & FFA_SECURE_PARTITION_ID_FLAG)
1207 		return SECURE_PARTITION;
1208 	else
1209 		return NON_SECURE_VM;
1210 }
1211 
1212 /* notifier_hnode_get* should be called with notify_lock held */
1213 static struct notifier_cb_info *
1214 notifier_hnode_get_by_vmid(u16 notify_id, int vmid)
1215 {
1216 	struct notifier_cb_info *node;
1217 
1218 	hash_for_each_possible(drv_info->notifier_hash, node, hnode, notify_id)
1219 		if (node->fwk_cb && vmid == node->dev->vm_id)
1220 			return node;
1221 
1222 	return NULL;
1223 }
1224 
1225 static struct notifier_cb_info *
1226 notifier_hnode_get_by_vmid_uuid(u16 notify_id, int vmid, const uuid_t *uuid)
1227 {
1228 	struct notifier_cb_info *node;
1229 
1230 	if (uuid_is_null(uuid))
1231 		return notifier_hnode_get_by_vmid(notify_id, vmid);
1232 
1233 	hash_for_each_possible(drv_info->notifier_hash, node, hnode, notify_id)
1234 		if (node->fwk_cb && vmid == node->dev->vm_id &&
1235 		    uuid_equal(&node->dev->uuid, uuid))
1236 			return node;
1237 
1238 	return NULL;
1239 }
1240 
1241 static struct notifier_cb_info *
1242 notifier_hnode_get_by_type(u16 notify_id, enum notify_type type)
1243 {
1244 	struct notifier_cb_info *node;
1245 
1246 	hash_for_each_possible(drv_info->notifier_hash, node, hnode, notify_id)
1247 		if (node->cb && type == ffa_notify_type_get(node->dev->vm_id))
1248 			return node;
1249 
1250 	return NULL;
1251 }
1252 
1253 static int
1254 update_notifier_cb(struct ffa_device *dev, int notify_id, void *cb,
1255 		   void *cb_data, bool is_registration, bool is_framework)
1256 {
1257 	struct notifier_cb_info *cb_info = NULL;
1258 	enum notify_type type = ffa_notify_type_get(dev->vm_id);
1259 	bool cb_found;
1260 
1261 	if (is_framework)
1262 		cb_info = notifier_hnode_get_by_vmid_uuid(notify_id, dev->vm_id,
1263 							  &dev->uuid);
1264 	else
1265 		cb_info = notifier_hnode_get_by_type(notify_id, type);
1266 
1267 	cb_found = !!cb_info;
1268 
1269 	if (!(is_registration ^ cb_found))
1270 		return -EINVAL;
1271 
1272 	if (is_registration) {
1273 		cb_info = kzalloc(sizeof(*cb_info), GFP_KERNEL);
1274 		if (!cb_info)
1275 			return -ENOMEM;
1276 
1277 		cb_info->dev = dev;
1278 		cb_info->cb_data = cb_data;
1279 		if (is_framework)
1280 			cb_info->fwk_cb = cb;
1281 		else
1282 			cb_info->cb = cb;
1283 
1284 		hash_add(drv_info->notifier_hash, &cb_info->hnode, notify_id);
1285 	} else {
1286 		hash_del(&cb_info->hnode);
1287 	}
1288 
1289 	return 0;
1290 }
1291 
1292 static int __ffa_notify_relinquish(struct ffa_device *dev, int notify_id,
1293 				   bool is_framework)
1294 {
1295 	int rc;
1296 
1297 	if (ffa_notifications_disabled())
1298 		return -EOPNOTSUPP;
1299 
1300 	if (notify_id >= FFA_MAX_NOTIFICATIONS)
1301 		return -EINVAL;
1302 
1303 	mutex_lock(&drv_info->notify_lock);
1304 
1305 	rc = update_notifier_cb(dev, notify_id, NULL, NULL, false,
1306 				is_framework);
1307 	if (rc) {
1308 		pr_err("Could not unregister notification callback\n");
1309 		mutex_unlock(&drv_info->notify_lock);
1310 		return rc;
1311 	}
1312 
1313 	if (!is_framework)
1314 		rc = ffa_notification_unbind(dev->vm_id, BIT(notify_id));
1315 
1316 	mutex_unlock(&drv_info->notify_lock);
1317 
1318 	return rc;
1319 }
1320 
1321 static int ffa_notify_relinquish(struct ffa_device *dev, int notify_id)
1322 {
1323 	return __ffa_notify_relinquish(dev, notify_id, false);
1324 }
1325 
1326 static int ffa_fwk_notify_relinquish(struct ffa_device *dev, int notify_id)
1327 {
1328 	return __ffa_notify_relinquish(dev, notify_id, true);
1329 }
1330 
1331 static int __ffa_notify_request(struct ffa_device *dev, bool is_per_vcpu,
1332 				void *cb, void *cb_data,
1333 				int notify_id, bool is_framework)
1334 {
1335 	int rc;
1336 	u32 flags = 0;
1337 
1338 	if (ffa_notifications_disabled())
1339 		return -EOPNOTSUPP;
1340 
1341 	if (notify_id >= FFA_MAX_NOTIFICATIONS)
1342 		return -EINVAL;
1343 
1344 	mutex_lock(&drv_info->notify_lock);
1345 
1346 	if (!is_framework) {
1347 		if (is_per_vcpu)
1348 			flags = PER_VCPU_NOTIFICATION_FLAG;
1349 
1350 		rc = ffa_notification_bind(dev->vm_id, BIT(notify_id), flags);
1351 		if (rc) {
1352 			mutex_unlock(&drv_info->notify_lock);
1353 			return rc;
1354 		}
1355 	}
1356 
1357 	rc = update_notifier_cb(dev, notify_id, cb, cb_data, true,
1358 				is_framework);
1359 	if (rc) {
1360 		pr_err("Failed to register callback for %d - %d\n",
1361 		       notify_id, rc);
1362 		if (!is_framework)
1363 			ffa_notification_unbind(dev->vm_id, BIT(notify_id));
1364 	}
1365 	mutex_unlock(&drv_info->notify_lock);
1366 
1367 	return rc;
1368 }
1369 
1370 static int ffa_notify_request(struct ffa_device *dev, bool is_per_vcpu,
1371 			      ffa_notifier_cb cb, void *cb_data, int notify_id)
1372 {
1373 	return __ffa_notify_request(dev, is_per_vcpu, cb, cb_data, notify_id,
1374 				    false);
1375 }
1376 
1377 static int
1378 ffa_fwk_notify_request(struct ffa_device *dev, ffa_fwk_notifier_cb cb,
1379 		       void *cb_data, int notify_id)
1380 {
1381 	return __ffa_notify_request(dev, false, cb, cb_data, notify_id, true);
1382 }
1383 
1384 static int ffa_notify_send(struct ffa_device *dev, int notify_id,
1385 			   bool is_per_vcpu, u16 vcpu)
1386 {
1387 	u32 flags = 0;
1388 
1389 	if (ffa_notifications_disabled())
1390 		return -EOPNOTSUPP;
1391 
1392 	if (is_per_vcpu)
1393 		flags |= (PER_VCPU_NOTIFICATION_FLAG | vcpu << 16);
1394 
1395 	return ffa_notification_set(dev->vm_id, drv_info->vm_id, flags,
1396 				    BIT(notify_id));
1397 }
1398 
1399 static void handle_notif_callbacks(u64 bitmap, enum notify_type type)
1400 {
1401 	int notify_id;
1402 	struct notifier_cb_info *cb_info = NULL;
1403 
1404 	for (notify_id = 0; notify_id <= FFA_MAX_NOTIFICATIONS && bitmap;
1405 	     notify_id++, bitmap >>= 1) {
1406 		if (!(bitmap & 1))
1407 			continue;
1408 
1409 		mutex_lock(&drv_info->notify_lock);
1410 		cb_info = notifier_hnode_get_by_type(notify_id, type);
1411 		mutex_unlock(&drv_info->notify_lock);
1412 
1413 		if (cb_info && cb_info->cb)
1414 			cb_info->cb(notify_id, cb_info->cb_data);
1415 	}
1416 }
1417 
1418 static void handle_fwk_notif_callbacks(u32 bitmap)
1419 {
1420 	void *buf;
1421 	uuid_t uuid;
1422 	int notify_id = 0, target;
1423 	struct ffa_indirect_msg_hdr *msg;
1424 	struct notifier_cb_info *cb_info = NULL;
1425 
1426 	/* Only one framework notification defined and supported for now */
1427 	if (!(bitmap & FRAMEWORK_NOTIFY_RX_BUFFER_FULL))
1428 		return;
1429 
1430 	mutex_lock(&drv_info->rx_lock);
1431 
1432 	msg = drv_info->rx_buffer;
1433 	buf = kmemdup((void *)msg + msg->offset, msg->size, GFP_KERNEL);
1434 	if (!buf) {
1435 		mutex_unlock(&drv_info->rx_lock);
1436 		return;
1437 	}
1438 
1439 	target = SENDER_ID(msg->send_recv_id);
1440 	if (msg->offset >= sizeof(*msg))
1441 		uuid_copy(&uuid, &msg->uuid);
1442 	else
1443 		uuid_copy(&uuid, &uuid_null);
1444 
1445 	mutex_unlock(&drv_info->rx_lock);
1446 
1447 	ffa_rx_release();
1448 
1449 	mutex_lock(&drv_info->notify_lock);
1450 	cb_info = notifier_hnode_get_by_vmid_uuid(notify_id, target, &uuid);
1451 	mutex_unlock(&drv_info->notify_lock);
1452 
1453 	if (cb_info && cb_info->fwk_cb)
1454 		cb_info->fwk_cb(notify_id, cb_info->cb_data, buf);
1455 	kfree(buf);
1456 }
1457 
1458 static void notif_get_and_handle(void *cb_data)
1459 {
1460 	int rc;
1461 	u32 flags;
1462 	struct ffa_drv_info *info = cb_data;
1463 	struct ffa_notify_bitmaps bitmaps = { 0 };
1464 
1465 	if (info->vm_id == 0) /* Non secure physical instance */
1466 		flags = FFA_BITMAP_SECURE_ENABLE_MASK;
1467 	else
1468 		flags = FFA_BITMAP_ALL_ENABLE_MASK;
1469 
1470 	rc = ffa_notification_get(flags, &bitmaps);
1471 	if (rc) {
1472 		pr_err("Failed to retrieve notifications with %d!\n", rc);
1473 		return;
1474 	}
1475 
1476 	handle_fwk_notif_callbacks(SPM_FRAMEWORK_BITMAP(bitmaps.arch_map));
1477 	handle_fwk_notif_callbacks(NS_HYP_FRAMEWORK_BITMAP(bitmaps.arch_map));
1478 	handle_notif_callbacks(bitmaps.vm_map, NON_SECURE_VM);
1479 	handle_notif_callbacks(bitmaps.sp_map, SECURE_PARTITION);
1480 }
1481 
1482 static void
1483 ffa_self_notif_handle(u16 vcpu, bool is_per_vcpu, void *cb_data)
1484 {
1485 	struct ffa_drv_info *info = cb_data;
1486 
1487 	if (!is_per_vcpu)
1488 		notif_get_and_handle(info);
1489 	else
1490 		smp_call_function_single(vcpu, notif_get_and_handle, info, 0);
1491 }
1492 
1493 static void notif_pcpu_irq_work_fn(struct work_struct *work)
1494 {
1495 	struct ffa_drv_info *info = container_of(work, struct ffa_drv_info,
1496 						 notif_pcpu_work);
1497 
1498 	ffa_self_notif_handle(smp_processor_id(), true, info);
1499 }
1500 
1501 static const struct ffa_info_ops ffa_drv_info_ops = {
1502 	.api_version_get = ffa_api_version_get,
1503 	.partition_info_get = ffa_partition_info_get,
1504 };
1505 
1506 static const struct ffa_msg_ops ffa_drv_msg_ops = {
1507 	.mode_32bit_set = ffa_mode_32bit_set,
1508 	.sync_send_receive = ffa_sync_send_receive,
1509 	.indirect_send = ffa_indirect_msg_send,
1510 	.sync_send_receive2 = ffa_sync_send_receive2,
1511 };
1512 
1513 static const struct ffa_mem_ops ffa_drv_mem_ops = {
1514 	.memory_reclaim = ffa_memory_reclaim,
1515 	.memory_share = ffa_memory_share,
1516 	.memory_lend = ffa_memory_lend,
1517 };
1518 
1519 static const struct ffa_cpu_ops ffa_drv_cpu_ops = {
1520 	.run = ffa_run,
1521 };
1522 
1523 static const struct ffa_notifier_ops ffa_drv_notifier_ops = {
1524 	.sched_recv_cb_register = ffa_sched_recv_cb_register,
1525 	.sched_recv_cb_unregister = ffa_sched_recv_cb_unregister,
1526 	.notify_request = ffa_notify_request,
1527 	.notify_relinquish = ffa_notify_relinquish,
1528 	.fwk_notify_request = ffa_fwk_notify_request,
1529 	.fwk_notify_relinquish = ffa_fwk_notify_relinquish,
1530 	.notify_send = ffa_notify_send,
1531 };
1532 
1533 static const struct ffa_ops ffa_drv_ops = {
1534 	.info_ops = &ffa_drv_info_ops,
1535 	.msg_ops = &ffa_drv_msg_ops,
1536 	.mem_ops = &ffa_drv_mem_ops,
1537 	.cpu_ops = &ffa_drv_cpu_ops,
1538 	.notifier_ops = &ffa_drv_notifier_ops,
1539 };
1540 
1541 void ffa_device_match_uuid(struct ffa_device *ffa_dev, const uuid_t *uuid)
1542 {
1543 	int count, idx;
1544 	struct ffa_partition_info *pbuf, *tpbuf;
1545 
1546 	count = ffa_partition_probe(uuid, &pbuf);
1547 	if (count <= 0)
1548 		return;
1549 
1550 	for (idx = 0, tpbuf = pbuf; idx < count; idx++, tpbuf++)
1551 		if (tpbuf->id == ffa_dev->vm_id)
1552 			uuid_copy(&ffa_dev->uuid, uuid);
1553 	kfree(pbuf);
1554 }
1555 
1556 static int
1557 ffa_bus_notifier(struct notifier_block *nb, unsigned long action, void *data)
1558 {
1559 	struct device *dev = data;
1560 	struct ffa_device *fdev = to_ffa_dev(dev);
1561 
1562 	if (action == BUS_NOTIFY_BIND_DRIVER) {
1563 		struct ffa_driver *ffa_drv = to_ffa_driver(dev->driver);
1564 		const struct ffa_device_id *id_table = ffa_drv->id_table;
1565 
1566 		/*
1567 		 * FF-A v1.1 provides UUID for each partition as part of the
1568 		 * discovery API, the discovered UUID must be populated in the
1569 		 * device's UUID and there is no need to workaround by copying
1570 		 * the same from the driver table.
1571 		 */
1572 		if (uuid_is_null(&fdev->uuid))
1573 			ffa_device_match_uuid(fdev, &id_table->uuid);
1574 
1575 		return NOTIFY_OK;
1576 	}
1577 
1578 	return NOTIFY_DONE;
1579 }
1580 
1581 static struct notifier_block ffa_bus_nb = {
1582 	.notifier_call = ffa_bus_notifier,
1583 };
1584 
1585 static int ffa_xa_add_partition_info(struct ffa_device *dev)
1586 {
1587 	struct ffa_dev_part_info *info;
1588 	struct list_head *head, *phead;
1589 	int ret = -ENOMEM;
1590 
1591 	phead = xa_load(&drv_info->partition_info, dev->vm_id);
1592 	if (phead) {
1593 		head = phead;
1594 		list_for_each_entry(info, head, node) {
1595 			if (info->dev == dev) {
1596 				pr_err("%s: duplicate dev %p part ID 0x%x\n",
1597 				       __func__, dev, dev->vm_id);
1598 				return -EEXIST;
1599 			}
1600 		}
1601 	}
1602 
1603 	info = kzalloc(sizeof(*info), GFP_KERNEL);
1604 	if (!info)
1605 		return ret;
1606 
1607 	rwlock_init(&info->rw_lock);
1608 	info->dev = dev;
1609 
1610 	if (!phead) {
1611 		phead = kzalloc(sizeof(*phead), GFP_KERNEL);
1612 		if (!phead)
1613 			goto free_out;
1614 
1615 		INIT_LIST_HEAD(phead);
1616 
1617 		ret = xa_insert(&drv_info->partition_info, dev->vm_id, phead,
1618 				GFP_KERNEL);
1619 		if (ret) {
1620 			pr_err("%s: failed to save part ID 0x%x Ret:%d\n",
1621 			       __func__, dev->vm_id, ret);
1622 			goto free_out;
1623 		}
1624 	}
1625 	list_add(&info->node, phead);
1626 	return 0;
1627 
1628 free_out:
1629 	kfree(phead);
1630 	kfree(info);
1631 	return ret;
1632 }
1633 
1634 static int ffa_setup_host_partition(int vm_id)
1635 {
1636 	struct ffa_partition_info buf = { 0 };
1637 	struct ffa_device *ffa_dev;
1638 	int ret;
1639 
1640 	buf.id = vm_id;
1641 	ffa_dev = ffa_device_register(&buf, &ffa_drv_ops);
1642 	if (!ffa_dev) {
1643 		pr_err("%s: failed to register host partition ID 0x%x\n",
1644 		       __func__, vm_id);
1645 		return -EINVAL;
1646 	}
1647 
1648 	ret = ffa_xa_add_partition_info(ffa_dev);
1649 	if (ret)
1650 		return ret;
1651 
1652 	if (ffa_notifications_disabled())
1653 		return 0;
1654 
1655 	ret = ffa_sched_recv_cb_update(ffa_dev, ffa_self_notif_handle,
1656 				       drv_info, true);
1657 	if (ret)
1658 		pr_info("Failed to register driver sched callback %d\n", ret);
1659 
1660 	return ret;
1661 }
1662 
1663 static void ffa_partitions_cleanup(void)
1664 {
1665 	struct list_head *phead;
1666 	unsigned long idx;
1667 
1668 	/* Clean up/free all registered devices */
1669 	ffa_devices_unregister();
1670 
1671 	xa_for_each(&drv_info->partition_info, idx, phead) {
1672 		struct ffa_dev_part_info *info, *tmp;
1673 
1674 		xa_erase(&drv_info->partition_info, idx);
1675 		list_for_each_entry_safe(info, tmp, phead, node) {
1676 			list_del(&info->node);
1677 			kfree(info);
1678 		}
1679 		kfree(phead);
1680 	}
1681 
1682 	xa_destroy(&drv_info->partition_info);
1683 }
1684 
1685 static int ffa_setup_partitions(void)
1686 {
1687 	int count, idx, ret;
1688 	struct ffa_device *ffa_dev;
1689 	struct ffa_partition_info *pbuf, *tpbuf;
1690 
1691 	if (drv_info->version == FFA_VERSION_1_0) {
1692 		ret = bus_register_notifier(&ffa_bus_type, &ffa_bus_nb);
1693 		if (ret)
1694 			pr_err("Failed to register FF-A bus notifiers\n");
1695 	}
1696 
1697 	count = ffa_partition_probe(&uuid_null, &pbuf);
1698 	if (count <= 0) {
1699 		pr_info("%s: No partitions found, error %d\n", __func__, count);
1700 		return -EINVAL;
1701 	}
1702 
1703 	xa_init(&drv_info->partition_info);
1704 	for (idx = 0, tpbuf = pbuf; idx < count; idx++, tpbuf++) {
1705 		/* Note that if the UUID will be uuid_null, that will require
1706 		 * ffa_bus_notifier() to find the UUID of this partition id
1707 		 * with help of ffa_device_match_uuid(). FF-A v1.1 and above
1708 		 * provides UUID here for each partition as part of the
1709 		 * discovery API and the same is passed.
1710 		 */
1711 		ffa_dev = ffa_device_register(tpbuf, &ffa_drv_ops);
1712 		if (!ffa_dev) {
1713 			pr_err("%s: failed to register partition ID 0x%x\n",
1714 			       __func__, tpbuf->id);
1715 			continue;
1716 		}
1717 
1718 		if (drv_info->version > FFA_VERSION_1_0 &&
1719 		    !(tpbuf->properties & FFA_PARTITION_AARCH64_EXEC))
1720 			ffa_mode_32bit_set(ffa_dev);
1721 
1722 		if (ffa_xa_add_partition_info(ffa_dev)) {
1723 			ffa_device_unregister(ffa_dev);
1724 			continue;
1725 		}
1726 	}
1727 
1728 	kfree(pbuf);
1729 
1730 	/*
1731 	 * Check if the host is already added as part of partition info
1732 	 * No multiple UUID possible for the host, so just checking if
1733 	 * there is an entry will suffice
1734 	 */
1735 	if (xa_load(&drv_info->partition_info, drv_info->vm_id))
1736 		return 0;
1737 
1738 	/* Allocate for the host */
1739 	ret = ffa_setup_host_partition(drv_info->vm_id);
1740 	if (ret)
1741 		ffa_partitions_cleanup();
1742 
1743 	return ret;
1744 }
1745 
1746 /* FFA FEATURE IDs */
1747 #define FFA_FEAT_NOTIFICATION_PENDING_INT	(1)
1748 #define FFA_FEAT_SCHEDULE_RECEIVER_INT		(2)
1749 #define FFA_FEAT_MANAGED_EXIT_INT		(3)
1750 
1751 static irqreturn_t ffa_sched_recv_irq_handler(int irq, void *irq_data)
1752 {
1753 	struct ffa_pcpu_irq *pcpu = irq_data;
1754 	struct ffa_drv_info *info = pcpu->info;
1755 
1756 	queue_work(info->notif_pcpu_wq, &info->sched_recv_irq_work);
1757 
1758 	return IRQ_HANDLED;
1759 }
1760 
1761 static irqreturn_t notif_pend_irq_handler(int irq, void *irq_data)
1762 {
1763 	struct ffa_pcpu_irq *pcpu = irq_data;
1764 	struct ffa_drv_info *info = pcpu->info;
1765 
1766 	queue_work_on(smp_processor_id(), info->notif_pcpu_wq,
1767 		      &info->notif_pcpu_work);
1768 
1769 	return IRQ_HANDLED;
1770 }
1771 
1772 static void ffa_sched_recv_irq_work_fn(struct work_struct *work)
1773 {
1774 	ffa_notification_info_get();
1775 }
1776 
1777 static int ffa_irq_map(u32 id)
1778 {
1779 	char *err_str;
1780 	int ret, irq, intid;
1781 
1782 	if (id == FFA_FEAT_NOTIFICATION_PENDING_INT)
1783 		err_str = "Notification Pending Interrupt";
1784 	else if (id == FFA_FEAT_SCHEDULE_RECEIVER_INT)
1785 		err_str = "Schedule Receiver Interrupt";
1786 	else
1787 		err_str = "Unknown ID";
1788 
1789 	/* The returned intid is assumed to be SGI donated to NS world */
1790 	ret = ffa_features(id, 0, &intid, NULL);
1791 	if (ret < 0) {
1792 		if (ret != -EOPNOTSUPP)
1793 			pr_err("Failed to retrieve FF-A %s %u\n", err_str, id);
1794 		return ret;
1795 	}
1796 
1797 	if (acpi_disabled) {
1798 		struct of_phandle_args oirq = {};
1799 		struct device_node *gic;
1800 
1801 		/* Only GICv3 supported currently with the device tree */
1802 		gic = of_find_compatible_node(NULL, NULL, "arm,gic-v3");
1803 		if (!gic)
1804 			return -ENXIO;
1805 
1806 		oirq.np = gic;
1807 		oirq.args_count = 1;
1808 		oirq.args[0] = intid;
1809 		irq = irq_create_of_mapping(&oirq);
1810 		of_node_put(gic);
1811 #ifdef CONFIG_ACPI
1812 	} else {
1813 		irq = acpi_register_gsi(NULL, intid, ACPI_EDGE_SENSITIVE,
1814 					ACPI_ACTIVE_HIGH);
1815 #endif
1816 	}
1817 
1818 	if (irq <= 0) {
1819 		pr_err("Failed to create IRQ mapping!\n");
1820 		return -ENODATA;
1821 	}
1822 
1823 	return irq;
1824 }
1825 
1826 static void ffa_irq_unmap(unsigned int irq)
1827 {
1828 	if (!irq)
1829 		return;
1830 	irq_dispose_mapping(irq);
1831 }
1832 
1833 static int ffa_cpuhp_pcpu_irq_enable(unsigned int cpu)
1834 {
1835 	if (drv_info->sched_recv_irq)
1836 		enable_percpu_irq(drv_info->sched_recv_irq, IRQ_TYPE_NONE);
1837 	if (drv_info->notif_pend_irq)
1838 		enable_percpu_irq(drv_info->notif_pend_irq, IRQ_TYPE_NONE);
1839 	return 0;
1840 }
1841 
1842 static int ffa_cpuhp_pcpu_irq_disable(unsigned int cpu)
1843 {
1844 	if (drv_info->sched_recv_irq)
1845 		disable_percpu_irq(drv_info->sched_recv_irq);
1846 	if (drv_info->notif_pend_irq)
1847 		disable_percpu_irq(drv_info->notif_pend_irq);
1848 	return 0;
1849 }
1850 
1851 static void ffa_uninit_pcpu_irq(void)
1852 {
1853 	if (drv_info->cpuhp_state) {
1854 		cpuhp_remove_state(drv_info->cpuhp_state);
1855 		drv_info->cpuhp_state = 0;
1856 	}
1857 
1858 	if (drv_info->notif_pcpu_wq) {
1859 		destroy_workqueue(drv_info->notif_pcpu_wq);
1860 		drv_info->notif_pcpu_wq = NULL;
1861 	}
1862 
1863 	if (drv_info->sched_recv_irq)
1864 		free_percpu_irq(drv_info->sched_recv_irq, drv_info->irq_pcpu);
1865 
1866 	if (drv_info->notif_pend_irq)
1867 		free_percpu_irq(drv_info->notif_pend_irq, drv_info->irq_pcpu);
1868 
1869 	if (drv_info->irq_pcpu) {
1870 		free_percpu(drv_info->irq_pcpu);
1871 		drv_info->irq_pcpu = NULL;
1872 	}
1873 }
1874 
1875 static int ffa_init_pcpu_irq(void)
1876 {
1877 	struct ffa_pcpu_irq __percpu *irq_pcpu;
1878 	int ret, cpu;
1879 
1880 	irq_pcpu = alloc_percpu(struct ffa_pcpu_irq);
1881 	if (!irq_pcpu)
1882 		return -ENOMEM;
1883 
1884 	for_each_present_cpu(cpu)
1885 		per_cpu_ptr(irq_pcpu, cpu)->info = drv_info;
1886 
1887 	drv_info->irq_pcpu = irq_pcpu;
1888 
1889 	if (drv_info->sched_recv_irq) {
1890 		ret = request_percpu_irq(drv_info->sched_recv_irq,
1891 					 ffa_sched_recv_irq_handler,
1892 					 "ARM-FFA-SRI", irq_pcpu);
1893 		if (ret) {
1894 			pr_err("Error registering percpu SRI nIRQ %d : %d\n",
1895 			       drv_info->sched_recv_irq, ret);
1896 			drv_info->sched_recv_irq = 0;
1897 			return ret;
1898 		}
1899 	}
1900 
1901 	if (drv_info->notif_pend_irq) {
1902 		ret = request_percpu_irq(drv_info->notif_pend_irq,
1903 					 notif_pend_irq_handler,
1904 					 "ARM-FFA-NPI", irq_pcpu);
1905 		if (ret) {
1906 			pr_err("Error registering percpu NPI nIRQ %d : %d\n",
1907 			       drv_info->notif_pend_irq, ret);
1908 			drv_info->notif_pend_irq = 0;
1909 			return ret;
1910 		}
1911 	}
1912 
1913 	INIT_WORK(&drv_info->sched_recv_irq_work, ffa_sched_recv_irq_work_fn);
1914 	INIT_WORK(&drv_info->notif_pcpu_work, notif_pcpu_irq_work_fn);
1915 	drv_info->notif_pcpu_wq = create_workqueue("ffa_pcpu_irq_notification");
1916 	if (!drv_info->notif_pcpu_wq)
1917 		return -EINVAL;
1918 
1919 	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "ffa/pcpu-irq:starting",
1920 				ffa_cpuhp_pcpu_irq_enable,
1921 				ffa_cpuhp_pcpu_irq_disable);
1922 
1923 	if (ret < 0)
1924 		return ret;
1925 
1926 	drv_info->cpuhp_state = ret;
1927 	return 0;
1928 }
1929 
1930 static void ffa_notifications_cleanup(void)
1931 {
1932 	ffa_uninit_pcpu_irq();
1933 	ffa_irq_unmap(drv_info->sched_recv_irq);
1934 	drv_info->sched_recv_irq = 0;
1935 	ffa_irq_unmap(drv_info->notif_pend_irq);
1936 	drv_info->notif_pend_irq = 0;
1937 
1938 	if (drv_info->bitmap_created) {
1939 		ffa_notification_bitmap_destroy();
1940 		drv_info->bitmap_created = false;
1941 	}
1942 	drv_info->notif_enabled = false;
1943 }
1944 
1945 static void ffa_notifications_setup(void)
1946 {
1947 	int ret;
1948 
1949 	ret = ffa_features(FFA_NOTIFICATION_BITMAP_CREATE, 0, NULL, NULL);
1950 	if (!ret) {
1951 		ret = ffa_notification_bitmap_create();
1952 		if (ret) {
1953 			pr_err("Notification bitmap create error %d\n", ret);
1954 			return;
1955 		}
1956 
1957 		drv_info->bitmap_created = true;
1958 	}
1959 
1960 	ret = ffa_irq_map(FFA_FEAT_SCHEDULE_RECEIVER_INT);
1961 	if (ret > 0)
1962 		drv_info->sched_recv_irq = ret;
1963 
1964 	ret = ffa_irq_map(FFA_FEAT_NOTIFICATION_PENDING_INT);
1965 	if (ret > 0)
1966 		drv_info->notif_pend_irq = ret;
1967 
1968 	if (!drv_info->sched_recv_irq && !drv_info->notif_pend_irq)
1969 		goto cleanup;
1970 
1971 	ret = ffa_init_pcpu_irq();
1972 	if (ret)
1973 		goto cleanup;
1974 
1975 	hash_init(drv_info->notifier_hash);
1976 	mutex_init(&drv_info->notify_lock);
1977 
1978 	drv_info->notif_enabled = true;
1979 	return;
1980 cleanup:
1981 	pr_info("Notification setup failed %d, not enabled\n", ret);
1982 	ffa_notifications_cleanup();
1983 }
1984 
1985 static int __init ffa_init(void)
1986 {
1987 	int ret;
1988 	u32 buf_sz;
1989 	size_t rxtx_bufsz = SZ_4K;
1990 
1991 	ret = ffa_transport_init(&invoke_ffa_fn);
1992 	if (ret)
1993 		return ret;
1994 
1995 	drv_info = kzalloc(sizeof(*drv_info), GFP_KERNEL);
1996 	if (!drv_info)
1997 		return -ENOMEM;
1998 
1999 	ret = ffa_version_check(&drv_info->version);
2000 	if (ret)
2001 		goto free_drv_info;
2002 
2003 	if (ffa_id_get(&drv_info->vm_id)) {
2004 		pr_err("failed to obtain VM id for self\n");
2005 		ret = -ENODEV;
2006 		goto free_drv_info;
2007 	}
2008 
2009 	ret = ffa_features(FFA_FN_NATIVE(RXTX_MAP), 0, &buf_sz, NULL);
2010 	if (!ret) {
2011 		if (RXTX_MAP_MIN_BUFSZ(buf_sz) == 1)
2012 			rxtx_bufsz = SZ_64K;
2013 		else if (RXTX_MAP_MIN_BUFSZ(buf_sz) == 2)
2014 			rxtx_bufsz = SZ_16K;
2015 		else
2016 			rxtx_bufsz = SZ_4K;
2017 	}
2018 
2019 	drv_info->rxtx_bufsz = rxtx_bufsz;
2020 	drv_info->rx_buffer = alloc_pages_exact(rxtx_bufsz, GFP_KERNEL);
2021 	if (!drv_info->rx_buffer) {
2022 		ret = -ENOMEM;
2023 		goto free_pages;
2024 	}
2025 
2026 	drv_info->tx_buffer = alloc_pages_exact(rxtx_bufsz, GFP_KERNEL);
2027 	if (!drv_info->tx_buffer) {
2028 		ret = -ENOMEM;
2029 		goto free_pages;
2030 	}
2031 
2032 	ret = ffa_rxtx_map(virt_to_phys(drv_info->tx_buffer),
2033 			   virt_to_phys(drv_info->rx_buffer),
2034 			   rxtx_bufsz / FFA_PAGE_SIZE);
2035 	if (ret) {
2036 		pr_err("failed to register FFA RxTx buffers\n");
2037 		goto free_pages;
2038 	}
2039 
2040 	mutex_init(&drv_info->rx_lock);
2041 	mutex_init(&drv_info->tx_lock);
2042 
2043 	ffa_drvinfo_flags_init();
2044 
2045 	ffa_notifications_setup();
2046 
2047 	ret = ffa_setup_partitions();
2048 	if (!ret)
2049 		return ret;
2050 
2051 	pr_err("failed to setup partitions\n");
2052 	ffa_notifications_cleanup();
2053 free_pages:
2054 	if (drv_info->tx_buffer)
2055 		free_pages_exact(drv_info->tx_buffer, rxtx_bufsz);
2056 	free_pages_exact(drv_info->rx_buffer, rxtx_bufsz);
2057 free_drv_info:
2058 	kfree(drv_info);
2059 	return ret;
2060 }
2061 module_init(ffa_init);
2062 
2063 static void __exit ffa_exit(void)
2064 {
2065 	ffa_notifications_cleanup();
2066 	ffa_partitions_cleanup();
2067 	ffa_rxtx_unmap(drv_info->vm_id);
2068 	free_pages_exact(drv_info->tx_buffer, drv_info->rxtx_bufsz);
2069 	free_pages_exact(drv_info->rx_buffer, drv_info->rxtx_bufsz);
2070 	kfree(drv_info);
2071 }
2072 module_exit(ffa_exit);
2073 
2074 MODULE_ALIAS("arm-ffa");
2075 MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
2076 MODULE_DESCRIPTION("Arm FF-A interface driver");
2077 MODULE_LICENSE("GPL v2");
2078