xref: /kvm-unit-tests/lib/s390x/malloc_io.c (revision b0fe398862052fc54ddb235d9ca2113901f6a56f)
1*b0fe3988SPierre Morel /* SPDX-License-Identifier: GPL-2.0-only */
2*b0fe3988SPierre Morel /*
3*b0fe3988SPierre Morel  * I/O page allocation
4*b0fe3988SPierre Morel  *
5*b0fe3988SPierre Morel  * Copyright (c) 2021 IBM Corp
6*b0fe3988SPierre Morel  *
7*b0fe3988SPierre Morel  * Authors:
8*b0fe3988SPierre Morel  *  Pierre Morel <pmorel@linux.ibm.com>
9*b0fe3988SPierre Morel  *
10*b0fe3988SPierre Morel  * Using this interface provide host access to the allocated pages in
11*b0fe3988SPierre Morel  * case the guest is a protected guest.
12*b0fe3988SPierre Morel  * This is needed for I/O buffers.
13*b0fe3988SPierre Morel  *
14*b0fe3988SPierre Morel  */
15*b0fe3988SPierre Morel #include <libcflat.h>
16*b0fe3988SPierre Morel #include <asm/page.h>
17*b0fe3988SPierre Morel #include <asm/uv.h>
18*b0fe3988SPierre Morel #include <malloc_io.h>
19*b0fe3988SPierre Morel #include <alloc_page.h>
20*b0fe3988SPierre Morel #include <asm/facility.h>
21*b0fe3988SPierre Morel #include <bitops.h>
22*b0fe3988SPierre Morel 
23*b0fe3988SPierre Morel static int share_pages(void *p, int count)
24*b0fe3988SPierre Morel {
25*b0fe3988SPierre Morel 	int i = 0;
26*b0fe3988SPierre Morel 
27*b0fe3988SPierre Morel 	for (i = 0; i < count; i++, p += PAGE_SIZE)
28*b0fe3988SPierre Morel 		if (uv_set_shared((unsigned long)p))
29*b0fe3988SPierre Morel 			break;
30*b0fe3988SPierre Morel 	return i;
31*b0fe3988SPierre Morel }
32*b0fe3988SPierre Morel 
33*b0fe3988SPierre Morel static void unshare_pages(void *p, int count)
34*b0fe3988SPierre Morel {
35*b0fe3988SPierre Morel 	int i;
36*b0fe3988SPierre Morel 
37*b0fe3988SPierre Morel 	for (i = count; i > 0; i--, p += PAGE_SIZE)
38*b0fe3988SPierre Morel 		uv_remove_shared((unsigned long)p);
39*b0fe3988SPierre Morel }
40*b0fe3988SPierre Morel 
41*b0fe3988SPierre Morel void *alloc_io_mem(int size, int flags)
42*b0fe3988SPierre Morel {
43*b0fe3988SPierre Morel 	int order = get_order(size >> PAGE_SHIFT);
44*b0fe3988SPierre Morel 	void *p;
45*b0fe3988SPierre Morel 	int n;
46*b0fe3988SPierre Morel 
47*b0fe3988SPierre Morel 	assert(size);
48*b0fe3988SPierre Morel 
49*b0fe3988SPierre Morel 	p = alloc_pages_flags(order, AREA_DMA31 | flags);
50*b0fe3988SPierre Morel 	if (!p || !test_facility(158))
51*b0fe3988SPierre Morel 		return p;
52*b0fe3988SPierre Morel 
53*b0fe3988SPierre Morel 	n = share_pages(p, 1 << order);
54*b0fe3988SPierre Morel 	if (n == 1 << order)
55*b0fe3988SPierre Morel 		return p;
56*b0fe3988SPierre Morel 
57*b0fe3988SPierre Morel 	unshare_pages(p, n);
58*b0fe3988SPierre Morel 	free_pages(p);
59*b0fe3988SPierre Morel 	return NULL;
60*b0fe3988SPierre Morel }
61*b0fe3988SPierre Morel 
62*b0fe3988SPierre Morel void free_io_mem(void *p, int size)
63*b0fe3988SPierre Morel {
64*b0fe3988SPierre Morel 	int order = get_order(size >> PAGE_SHIFT);
65*b0fe3988SPierre Morel 
66*b0fe3988SPierre Morel 	assert(IS_ALIGNED((uintptr_t)p, PAGE_SIZE));
67*b0fe3988SPierre Morel 
68*b0fe3988SPierre Morel 	if (test_facility(158))
69*b0fe3988SPierre Morel 		unshare_pages(p, 1 << order);
70*b0fe3988SPierre Morel 	free_pages(p);
71*b0fe3988SPierre Morel }
72