xref: /linux/arch/s390/kernel/ctlreg.c (revision bc46b7cbc58c4cb562b6a45a1fbc7b8e7b23df58)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *	Copyright IBM Corp. 1999, 2023
4  */
5 
6 #include <linux/irqflags.h>
7 #include <linux/spinlock.h>
8 #include <linux/export.h>
9 #include <linux/kernel.h>
10 #include <linux/init.h>
11 #include <linux/smp.h>
12 #include <linux/cache.h>
13 #include <asm/abs_lowcore.h>
14 #include <asm/ctlreg.h>
15 
16 /*
17  * ctl_lock guards access to global control register contents which
18  * are kept in the control register save area within absolute lowcore
19  * at physical address zero.
20  */
21 static DEFINE_SPINLOCK(system_ctl_lock);
22 
system_ctlreg_lock(void)23 void system_ctlreg_lock(void)
24 	__acquires(&system_ctl_lock)
25 {
26 	spin_lock(&system_ctl_lock);
27 }
28 
system_ctlreg_unlock(void)29 void system_ctlreg_unlock(void)
30 	__releases(&system_ctl_lock)
31 {
32 	spin_unlock(&system_ctl_lock);
33 }
34 
35 static bool system_ctlreg_area_init __ro_after_init;
36 
system_ctlreg_init_save_area(struct lowcore * lc)37 void __init system_ctlreg_init_save_area(struct lowcore *lc)
38 {
39 	struct lowcore *abs_lc;
40 
41 	abs_lc = get_abs_lowcore();
42 	__local_ctl_store(0, 15, lc->cregs_save_area);
43 	__local_ctl_store(0, 15, abs_lc->cregs_save_area);
44 	put_abs_lowcore(abs_lc);
45 	system_ctlreg_area_init = true;
46 }
47 
48 struct ctlreg_parms {
49 	unsigned long andval;
50 	unsigned long orval;
51 	unsigned long val;
52 	int request;
53 	int cr;
54 };
55 
ctlreg_callback(void * info)56 static void ctlreg_callback(void *info)
57 {
58 	struct ctlreg_parms *pp = info;
59 	struct ctlreg regs[16];
60 
61 	__local_ctl_store(0, 15, regs);
62 	if (pp->request == CTLREG_LOAD) {
63 		regs[pp->cr].val = pp->val;
64 	} else {
65 		regs[pp->cr].val &= pp->andval;
66 		regs[pp->cr].val |= pp->orval;
67 	}
68 	__local_ctl_load(0, 15, regs);
69 }
70 
system_ctlreg_update(void * info)71 static void system_ctlreg_update(void *info)
72 {
73 	unsigned long flags;
74 
75 	if (system_state == SYSTEM_BOOTING) {
76 		/*
77 		 * For very early calls do not call on_each_cpu()
78 		 * since not everything might be setup.
79 		 */
80 		local_irq_save(flags);
81 		ctlreg_callback(info);
82 		local_irq_restore(flags);
83 	} else {
84 		on_each_cpu(ctlreg_callback, info, 1);
85 	}
86 }
87 
system_ctlreg_modify(unsigned int cr,unsigned long data,int request)88 void system_ctlreg_modify(unsigned int cr, unsigned long data, int request)
89 {
90 	struct ctlreg_parms pp = { .cr = cr, .request = request, };
91 	struct lowcore *abs_lc;
92 
93 	switch (request) {
94 	case CTLREG_SET_BIT:
95 		pp.orval  = 1UL << data;
96 		pp.andval = -1UL;
97 		break;
98 	case CTLREG_CLEAR_BIT:
99 		pp.orval  = 0;
100 		pp.andval = ~(1UL << data);
101 		break;
102 	case CTLREG_LOAD:
103 		pp.val = data;
104 		break;
105 	}
106 	if (system_ctlreg_area_init) {
107 		system_ctlreg_lock();
108 		abs_lc = get_abs_lowcore();
109 		if (request == CTLREG_LOAD) {
110 			abs_lc->cregs_save_area[cr].val = pp.val;
111 		} else {
112 			abs_lc->cregs_save_area[cr].val &= pp.andval;
113 			abs_lc->cregs_save_area[cr].val |= pp.orval;
114 		}
115 		put_abs_lowcore(abs_lc);
116 		system_ctlreg_update(&pp);
117 		system_ctlreg_unlock();
118 	} else {
119 		system_ctlreg_update(&pp);
120 	}
121 }
122 EXPORT_SYMBOL(system_ctlreg_modify);
123