xref: /linux/arch/loongarch/kernel/smp.c (revision 7b26bc6582b13a52a42a4a9765e8f30d58a81198)
146859ac8SHuacai Chen // SPDX-License-Identifier: GPL-2.0-or-later
246859ac8SHuacai Chen /*
346859ac8SHuacai Chen  * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
446859ac8SHuacai Chen  *
546859ac8SHuacai Chen  * Derived from MIPS:
646859ac8SHuacai Chen  * Copyright (C) 2000, 2001 Kanoj Sarcar
746859ac8SHuacai Chen  * Copyright (C) 2000, 2001 Ralf Baechle
846859ac8SHuacai Chen  * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
946859ac8SHuacai Chen  * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
1046859ac8SHuacai Chen  */
11f6f0c9a7SHuacai Chen #include <linux/acpi.h>
1246859ac8SHuacai Chen #include <linux/cpu.h>
1346859ac8SHuacai Chen #include <linux/cpumask.h>
1446859ac8SHuacai Chen #include <linux/init.h>
1546859ac8SHuacai Chen #include <linux/interrupt.h>
1608f417dbSHuacai Chen #include <linux/irq_work.h>
17c718a0baSBibo Mao #include <linux/profile.h>
1846859ac8SHuacai Chen #include <linux/seq_file.h>
1946859ac8SHuacai Chen #include <linux/smp.h>
2046859ac8SHuacai Chen #include <linux/threads.h>
2146859ac8SHuacai Chen #include <linux/export.h>
22366bb35aSHuacai Chen #include <linux/suspend.h>
2346859ac8SHuacai Chen #include <linux/syscore_ops.h>
2446859ac8SHuacai Chen #include <linux/time.h>
2546859ac8SHuacai Chen #include <linux/tracepoint.h>
2646859ac8SHuacai Chen #include <linux/sched/hotplug.h>
2746859ac8SHuacai Chen #include <linux/sched/task_stack.h>
2846859ac8SHuacai Chen 
2946859ac8SHuacai Chen #include <asm/cpu.h>
3046859ac8SHuacai Chen #include <asm/idle.h>
3146859ac8SHuacai Chen #include <asm/loongson.h>
32d4b6f156SHuacai Chen #include <asm/mmu_context.h>
3374c16b2eSBibo Mao #include <asm/numa.h>
3446859ac8SHuacai Chen #include <asm/paravirt.h>
3546859ac8SHuacai Chen #include <asm/processor.h>
3646859ac8SHuacai Chen #include <asm/setup.h>
3746859ac8SHuacai Chen #include <asm/time.h>
3846859ac8SHuacai Chen 
3946859ac8SHuacai Chen int __cpu_number_map[NR_CPUS];   /* Map physical to logical */
4046859ac8SHuacai Chen EXPORT_SYMBOL(__cpu_number_map);
4146859ac8SHuacai Chen 
4246859ac8SHuacai Chen int __cpu_logical_map[NR_CPUS];		/* Map logical to physical */
4346859ac8SHuacai Chen EXPORT_SYMBOL(__cpu_logical_map);
4446859ac8SHuacai Chen 
4546859ac8SHuacai Chen /* Representing the threads (siblings) of each logical CPU */
4646859ac8SHuacai Chen cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
4746859ac8SHuacai Chen EXPORT_SYMBOL(cpu_sibling_map);
4846859ac8SHuacai Chen 
4946859ac8SHuacai Chen /* Representing the last level cache shared map of each logical CPU */
5046859ac8SHuacai Chen cpumask_t cpu_llc_shared_map[NR_CPUS] __read_mostly;
5146859ac8SHuacai Chen EXPORT_SYMBOL(cpu_llc_shared_map);
5246859ac8SHuacai Chen 
5346859ac8SHuacai Chen /* Representing the core map of multi-core chips of each logical CPU */
5446859ac8SHuacai Chen cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
5546859ac8SHuacai Chen EXPORT_SYMBOL(cpu_core_map);
5646859ac8SHuacai Chen 
5746859ac8SHuacai Chen static DECLARE_COMPLETION(cpu_starting);
5846859ac8SHuacai Chen static DECLARE_COMPLETION(cpu_running);
5946859ac8SHuacai Chen 
6046859ac8SHuacai Chen /*
6146859ac8SHuacai Chen  * A logcal cpu mask containing only one VPE per core to
6246859ac8SHuacai Chen  * reduce the number of IPIs on large MT systems.
6346859ac8SHuacai Chen  */
6446859ac8SHuacai Chen cpumask_t cpu_foreign_map[NR_CPUS] __read_mostly;
6546859ac8SHuacai Chen EXPORT_SYMBOL(cpu_foreign_map);
6646859ac8SHuacai Chen 
6746859ac8SHuacai Chen /* representing cpus for which sibling maps can be computed */
6846859ac8SHuacai Chen static cpumask_t cpu_sibling_setup_map;
6946859ac8SHuacai Chen 
7046859ac8SHuacai Chen /* representing cpus for which llc shared maps can be computed */
7146859ac8SHuacai Chen static cpumask_t cpu_llc_shared_setup_map;
7246859ac8SHuacai Chen 
7346859ac8SHuacai Chen /* representing cpus for which core maps can be computed */
7408f417dbSHuacai Chen static cpumask_t cpu_core_setup_map;
75ae16f05cSTianyang Zhang 
7646859ac8SHuacai Chen struct secondary_data cpuboot_data;
7746859ac8SHuacai Chen static DEFINE_PER_CPU(int, cpu_state);
7846859ac8SHuacai Chen 
7946859ac8SHuacai Chen static const char *ipi_types[NR_IPI] __tracepoint_string = {
8046859ac8SHuacai Chen 	[IPI_RESCHEDULE] = "Rescheduling interrupts",
8146859ac8SHuacai Chen 	[IPI_CALL_FUNCTION] = "Function call interrupts",
8246859ac8SHuacai Chen 	[IPI_IRQ_WORK] = "IRQ work interrupts",
8346859ac8SHuacai Chen 	[IPI_CLEAR_VECTOR] = "Clear vector interrupts",
8446859ac8SHuacai Chen };
85*ad2a05a6SDavid Wang 
show_ipi_list(struct seq_file * p,int prec)8646859ac8SHuacai Chen void show_ipi_list(struct seq_file *p, int prec)
8746859ac8SHuacai Chen {
8846859ac8SHuacai Chen 	unsigned int cpu, i;
8946859ac8SHuacai Chen 
90752cd08dSHuacai Chen 	for (i = 0; i < NR_IPI; i++) {
91752cd08dSHuacai Chen 		seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i, prec >= 4 ? " " : "");
92752cd08dSHuacai Chen 		for_each_online_cpu(cpu)
93752cd08dSHuacai Chen 			seq_put_decimal_ull_width(p, " ", per_cpu(irq_stat, cpu).ipi_irqs[i], 10);
94752cd08dSHuacai Chen 		seq_printf(p, " LoongArch  %d  %s\n", i + 1, ipi_types[i]);
95752cd08dSHuacai Chen 	}
96752cd08dSHuacai Chen }
97752cd08dSHuacai Chen 
set_cpu_core_map(int cpu)98752cd08dSHuacai Chen static inline void set_cpu_core_map(int cpu)
99752cd08dSHuacai Chen {
100752cd08dSHuacai Chen 	int i;
101752cd08dSHuacai Chen 
102752cd08dSHuacai Chen 	cpumask_set_cpu(cpu, &cpu_core_setup_map);
103752cd08dSHuacai Chen 
104752cd08dSHuacai Chen 	for_each_cpu(i, &cpu_core_setup_map) {
105752cd08dSHuacai Chen 		if (cpu_data[cpu].package == cpu_data[i].package) {
106752cd08dSHuacai Chen 			cpumask_set_cpu(i, &cpu_core_map[cpu]);
107752cd08dSHuacai Chen 			cpumask_set_cpu(cpu, &cpu_core_map[i]);
108752cd08dSHuacai Chen 		}
109752cd08dSHuacai Chen 	}
110752cd08dSHuacai Chen }
111752cd08dSHuacai Chen 
set_cpu_llc_shared_map(int cpu)112752cd08dSHuacai Chen static inline void set_cpu_llc_shared_map(int cpu)
113752cd08dSHuacai Chen {
114752cd08dSHuacai Chen 	int i;
115752cd08dSHuacai Chen 
116752cd08dSHuacai Chen 	cpumask_set_cpu(cpu, &cpu_llc_shared_setup_map);
117752cd08dSHuacai Chen 
118752cd08dSHuacai Chen 	for_each_cpu(i, &cpu_llc_shared_setup_map) {
119752cd08dSHuacai Chen 		if (cpu_to_node(cpu) == cpu_to_node(i)) {
120752cd08dSHuacai Chen 			cpumask_set_cpu(i, &cpu_llc_shared_map[cpu]);
121752cd08dSHuacai Chen 			cpumask_set_cpu(cpu, &cpu_llc_shared_map[i]);
122752cd08dSHuacai Chen 		}
123752cd08dSHuacai Chen 	}
124752cd08dSHuacai Chen }
125752cd08dSHuacai Chen 
clear_cpu_llc_shared_map(int cpu)126752cd08dSHuacai Chen static inline void clear_cpu_llc_shared_map(int cpu)
127752cd08dSHuacai Chen {
128752cd08dSHuacai Chen 	int i;
129752cd08dSHuacai Chen 
130752cd08dSHuacai Chen 	for_each_cpu(i, &cpu_llc_shared_setup_map) {
131752cd08dSHuacai Chen 		if (cpu_to_node(cpu) == cpu_to_node(i)) {
132752cd08dSHuacai Chen 			cpumask_clear_cpu(i, &cpu_llc_shared_map[cpu]);
133752cd08dSHuacai Chen 			cpumask_clear_cpu(cpu, &cpu_llc_shared_map[i]);
134752cd08dSHuacai Chen 		}
135752cd08dSHuacai Chen 	}
136752cd08dSHuacai Chen 
137752cd08dSHuacai Chen 	cpumask_clear_cpu(cpu, &cpu_llc_shared_setup_map);
138752cd08dSHuacai Chen }
139752cd08dSHuacai Chen 
set_cpu_sibling_map(int cpu)140752cd08dSHuacai Chen static inline void set_cpu_sibling_map(int cpu)
141752cd08dSHuacai Chen {
142752cd08dSHuacai Chen 	int i;
143752cd08dSHuacai Chen 
144752cd08dSHuacai Chen 	cpumask_set_cpu(cpu, &cpu_sibling_setup_map);
145752cd08dSHuacai Chen 
146752cd08dSHuacai Chen 	for_each_cpu(i, &cpu_sibling_setup_map) {
147752cd08dSHuacai Chen 		if (cpus_are_siblings(cpu, i)) {
148752cd08dSHuacai Chen 			cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
149752cd08dSHuacai Chen 			cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
150752cd08dSHuacai Chen 		}
151752cd08dSHuacai Chen 	}
152752cd08dSHuacai Chen }
153752cd08dSHuacai Chen 
clear_cpu_sibling_map(int cpu)154752cd08dSHuacai Chen static inline void clear_cpu_sibling_map(int cpu)
155752cd08dSHuacai Chen {
156752cd08dSHuacai Chen 	int i;
15746859ac8SHuacai Chen 
15846859ac8SHuacai Chen 	for_each_cpu(i, &cpu_sibling_setup_map) {
15946859ac8SHuacai Chen 		if (cpus_are_siblings(cpu, i)) {
16046859ac8SHuacai Chen 			cpumask_clear_cpu(i, &cpu_sibling_map[cpu]);
16146859ac8SHuacai Chen 			cpumask_clear_cpu(cpu, &cpu_sibling_map[i]);
16246859ac8SHuacai Chen 		}
16346859ac8SHuacai Chen 	}
16446859ac8SHuacai Chen 
16546859ac8SHuacai Chen 	cpumask_clear_cpu(cpu, &cpu_sibling_setup_map);
16646859ac8SHuacai Chen }
16746859ac8SHuacai Chen 
16846859ac8SHuacai Chen /*
16946859ac8SHuacai Chen  * Calculate a new cpu_foreign_map mask whenever a
17046859ac8SHuacai Chen  * new cpu appears or disappears.
17146859ac8SHuacai Chen  */
calculate_cpu_foreign_map(void)17246859ac8SHuacai Chen void calculate_cpu_foreign_map(void)
17346859ac8SHuacai Chen {
17446859ac8SHuacai Chen 	int i, k, core_present;
17546859ac8SHuacai Chen 	cpumask_t temp_foreign_map;
17646859ac8SHuacai Chen 
17746859ac8SHuacai Chen 	/* Re-calculate the mask */
17846859ac8SHuacai Chen 	cpumask_clear(&temp_foreign_map);
17946859ac8SHuacai Chen 	for_each_online_cpu(i) {
18046859ac8SHuacai Chen 		core_present = 0;
18146859ac8SHuacai Chen 		for_each_cpu(k, &temp_foreign_map)
18246859ac8SHuacai Chen 			if (cpus_are_siblings(i, k))
18346859ac8SHuacai Chen 				core_present = 1;
18446859ac8SHuacai Chen 		if (!core_present)
185e031a5f3SHuacai Chen 			cpumask_set_cpu(i, &temp_foreign_map);
18646859ac8SHuacai Chen 	}
18746859ac8SHuacai Chen 
18846859ac8SHuacai Chen 	for_each_online_cpu(i)
18946859ac8SHuacai Chen 		cpumask_andnot(&cpu_foreign_map[i],
19046859ac8SHuacai Chen 			       &temp_foreign_map, &cpu_sibling_map[i]);
19146859ac8SHuacai Chen }
192316863cbSBibo Mao 
19346859ac8SHuacai Chen /* Send mailbox buffer via Mail_Send */
csr_mail_send(uint64_t data,int cpu,int mailbox)194316863cbSBibo Mao static void csr_mail_send(uint64_t data, int cpu, int mailbox)
19546859ac8SHuacai Chen {
19646859ac8SHuacai Chen 	uint64_t val;
19746859ac8SHuacai Chen 
19846859ac8SHuacai Chen 	/* Send high 32 bits */
199316863cbSBibo Mao 	val = IOCSR_MBUF_SEND_BLOCKING;
20046859ac8SHuacai Chen 	val |= (IOCSR_MBUF_SEND_BOX_HI(mailbox) << IOCSR_MBUF_SEND_BOX_SHIFT);
20146859ac8SHuacai Chen 	val |= (cpu << IOCSR_MBUF_SEND_CPU_SHIFT);
20246859ac8SHuacai Chen 	val |= (data & IOCSR_MBUF_SEND_H32_MASK);
20346859ac8SHuacai Chen 	iocsr_write64(val, LOONGARCH_IOCSR_MBUF_SEND);
204316863cbSBibo Mao 
20546859ac8SHuacai Chen 	/* Send low 32 bits */
20646859ac8SHuacai Chen 	val = IOCSR_MBUF_SEND_BLOCKING;
20746859ac8SHuacai Chen 	val |= (IOCSR_MBUF_SEND_BOX_LO(mailbox) << IOCSR_MBUF_SEND_BOX_SHIFT);
20846859ac8SHuacai Chen 	val |= (cpu << IOCSR_MBUF_SEND_CPU_SHIFT);
20946859ac8SHuacai Chen 	val |= (data << IOCSR_MBUF_SEND_BUF_SHIFT);
21046859ac8SHuacai Chen 	iocsr_write64(val, LOONGARCH_IOCSR_MBUF_SEND);
21146859ac8SHuacai Chen };
212143d64bdSBibo Mao 
ipi_read_clear(int cpu)213143d64bdSBibo Mao static u32 ipi_read_clear(int cpu)
214143d64bdSBibo Mao {
215143d64bdSBibo Mao 	u32 action;
216143d64bdSBibo Mao 
2174c8c3c7fSValentin Schneider 	/* Load the ipi register to figure out what we're supposed to do */
218143d64bdSBibo Mao 	action = iocsr_read32(LOONGARCH_IOCSR_IPI_STATUS);
219316863cbSBibo Mao 	/* Clear the ipi register to clear the interrupt */
220143d64bdSBibo Mao 	iocsr_write32(action, LOONGARCH_IOCSR_IPI_CLEAR);
2214c8c3c7fSValentin Schneider 	wbflush();
222143d64bdSBibo Mao 
22308f417dbSHuacai Chen 	return action;
22408f417dbSHuacai Chen }
22508f417dbSHuacai Chen 
ipi_write_action(int cpu,u32 action)22608f417dbSHuacai Chen static void ipi_write_action(int cpu, u32 action)
22708f417dbSHuacai Chen {
22808f417dbSHuacai Chen 	uint32_t val;
22908f417dbSHuacai Chen 
230316863cbSBibo Mao 	val = IOCSR_IPI_SEND_BLOCKING | action;
23146859ac8SHuacai Chen 	val |= (cpu << IOCSR_IPI_SEND_CPU_SHIFT);
23246859ac8SHuacai Chen 	iocsr_write32(val, LOONGARCH_IOCSR_IPI_SEND);
23346859ac8SHuacai Chen }
23446859ac8SHuacai Chen 
loongson_send_ipi_single(int cpu,unsigned int action)23546859ac8SHuacai Chen static void loongson_send_ipi_single(int cpu, unsigned int action)
23646859ac8SHuacai Chen {
23746859ac8SHuacai Chen 	ipi_write_action(cpu_logical_map(cpu), (u32)action);
23846859ac8SHuacai Chen }
23946859ac8SHuacai Chen 
loongson_send_ipi_mask(const struct cpumask * mask,unsigned int action)24046859ac8SHuacai Chen static void loongson_send_ipi_mask(const struct cpumask *mask, unsigned int action)
24146859ac8SHuacai Chen {
24246859ac8SHuacai Chen 	unsigned int i;
24346859ac8SHuacai Chen 
24446859ac8SHuacai Chen 	for_each_cpu(i, mask)
24546859ac8SHuacai Chen 		ipi_write_action(cpu_logical_map(i), (u32)action);
24646859ac8SHuacai Chen }
24708f417dbSHuacai Chen 
24808f417dbSHuacai Chen /*
24908f417dbSHuacai Chen  * This function sends a 'reschedule' IPI to another CPU.
25008f417dbSHuacai Chen  * it goes straight through and wastes no time serializing
25108f417dbSHuacai Chen  * anything. Worst case is that we lose a reschedule ...
252ae16f05cSTianyang Zhang  */
arch_smp_send_reschedule(int cpu)253ae16f05cSTianyang Zhang void arch_smp_send_reschedule(int cpu)
254ae16f05cSTianyang Zhang {
255ae16f05cSTianyang Zhang 	mp_ops.send_ipi_single(cpu, ACTION_RESCHEDULE);
256ae16f05cSTianyang Zhang }
25746859ac8SHuacai Chen EXPORT_SYMBOL_GPL(arch_smp_send_reschedule);
25846859ac8SHuacai Chen 
25946859ac8SHuacai Chen #ifdef CONFIG_IRQ_WORK
arch_irq_work_raise(void)260316863cbSBibo Mao void arch_irq_work_raise(void)
261316863cbSBibo Mao {
262316863cbSBibo Mao 	mp_ops.send_ipi_single(smp_processor_id(), ACTION_IRQ_WORK);
263316863cbSBibo Mao }
264316863cbSBibo Mao #endif
265316863cbSBibo Mao 
loongson_ipi_interrupt(int irq,void * dev)266316863cbSBibo Mao static irqreturn_t loongson_ipi_interrupt(int irq, void *dev)
267316863cbSBibo Mao {
268316863cbSBibo Mao 	unsigned int action;
269316863cbSBibo Mao 	unsigned int cpu = smp_processor_id();
270316863cbSBibo Mao 
271316863cbSBibo Mao 	action = ipi_read_clear(cpu_logical_map(cpu));
272316863cbSBibo Mao 
273316863cbSBibo Mao 	if (action & SMP_RESCHEDULE) {
274316863cbSBibo Mao 		scheduler_ipi();
275316863cbSBibo Mao 		per_cpu(irq_stat, cpu).ipi_irqs[IPI_RESCHEDULE]++;
276316863cbSBibo Mao 	}
277316863cbSBibo Mao 
278316863cbSBibo Mao 	if (action & SMP_CALL_FUNCTION) {
279316863cbSBibo Mao 		generic_smp_call_function_interrupt();
28088d4d957SBinbin Zhou 		per_cpu(irq_stat, cpu).ipi_irqs[IPI_CALL_FUNCTION]++;
28188d4d957SBinbin Zhou 	}
28288d4d957SBinbin Zhou 
28388d4d957SBinbin Zhou 	if (action & SMP_IRQ_WORK) {
28488d4d957SBinbin Zhou 		irq_work_run();
28588d4d957SBinbin Zhou 		per_cpu(irq_stat, cpu).ipi_irqs[IPI_IRQ_WORK]++;
28688d4d957SBinbin Zhou 	}
28788d4d957SBinbin Zhou 
28888d4d957SBinbin Zhou 	if (action & SMP_CLEAR_VECTOR) {
28988d4d957SBinbin Zhou 		complete_irq_moving();
29088d4d957SBinbin Zhou 		per_cpu(irq_stat, cpu).ipi_irqs[IPI_CLEAR_VECTOR]++;
29188d4d957SBinbin Zhou 	}
29288d4d957SBinbin Zhou 
29388d4d957SBinbin Zhou 	return IRQ_HANDLED;
29412d3b559SHuacai Chen }
29588d4d957SBinbin Zhou 
loongson_init_ipi(void)29612d3b559SHuacai Chen static void loongson_init_ipi(void)
29712d3b559SHuacai Chen {
29888d4d957SBinbin Zhou 	int r, ipi_irq;
29988d4d957SBinbin Zhou 
30088d4d957SBinbin Zhou 	ipi_irq = get_percpu_irq(INT_IPI);
30188d4d957SBinbin Zhou 	if (ipi_irq < 0)
30288d4d957SBinbin Zhou 		panic("IPI IRQ mapping failed\n");
30388d4d957SBinbin Zhou 
3043de9c42dSJiaxun Yang 	irq_set_percpu_devid(ipi_irq);
30530cec747SHuacai Chen 	r = request_percpu_irq(ipi_irq, loongson_ipi_interrupt, "IPI", &irq_stat);
3063de9c42dSJiaxun Yang 	if (r < 0)
30788d4d957SBinbin Zhou 		panic("IPI IRQ request failed\n");
30888d4d957SBinbin Zhou }
30988d4d957SBinbin Zhou 
310ce68ff35SHuacai Chen struct smp_ops mp_ops = {
31188d4d957SBinbin Zhou 	.init_ipi		= loongson_init_ipi,
31288d4d957SBinbin Zhou 	.send_ipi_single	= loongson_send_ipi_single,
31388d4d957SBinbin Zhou 	.send_ipi_mask		= loongson_send_ipi_mask,
314c56ab8e8SHuacai Chen };
31546859ac8SHuacai Chen 
fdt_smp_setup(void)31688d4d957SBinbin Zhou static void __init fdt_smp_setup(void)
31788d4d957SBinbin Zhou {
31844a01f1fSBinbin Zhou #ifdef CONFIG_OF
31944a01f1fSBinbin Zhou 	unsigned int cpu, cpuid;
32044a01f1fSBinbin Zhou 	struct device_node *node = NULL;
32146859ac8SHuacai Chen 
32246859ac8SHuacai Chen 	for_each_of_cpu_node(node) {
32346859ac8SHuacai Chen 		if (!of_device_is_available(node))
32474c16b2eSBibo Mao 			continue;
32546859ac8SHuacai Chen 
32646859ac8SHuacai Chen 		cpuid = of_get_cpu_hwid(node, 0);
32746859ac8SHuacai Chen 		if (cpuid >= nr_cpu_ids)
32846859ac8SHuacai Chen 			continue;
329c56ab8e8SHuacai Chen 
33046859ac8SHuacai Chen 		if (cpuid == loongson_sysconf.boot_cpu_id)
33146859ac8SHuacai Chen 			cpu = 0;
33246859ac8SHuacai Chen 		else
333f6f0c9a7SHuacai Chen 			cpu = find_first_zero_bit(cpumask_bits(cpu_present_mask), NR_CPUS);
334a6654a40SHuacai Chen 
335f6f0c9a7SHuacai Chen 		num_processors++;
33646859ac8SHuacai Chen 		set_cpu_possible(cpu, true);
33746859ac8SHuacai Chen 		set_cpu_present(cpu, true);
33846859ac8SHuacai Chen 		__cpu_number_map[cpuid] = cpu;
33946859ac8SHuacai Chen 		__cpu_logical_map[cpu] = cpuid;
34046859ac8SHuacai Chen 
34146859ac8SHuacai Chen 		early_numa_add_cpu(cpuid, 0);
34246859ac8SHuacai Chen 		set_cpuid_to_node(cpuid, 0);
34346859ac8SHuacai Chen 	}
34446859ac8SHuacai Chen 
34546859ac8SHuacai Chen 	loongson_sysconf.nr_cpus = num_processors;
34646859ac8SHuacai Chen 	set_bit(0, loongson_sysconf.cores_io_master);
347c56ab8e8SHuacai Chen #endif
34846859ac8SHuacai Chen }
34946859ac8SHuacai Chen 
loongson_smp_setup(void)35046859ac8SHuacai Chen void __init loongson_smp_setup(void)
35146859ac8SHuacai Chen {
35246859ac8SHuacai Chen 	fdt_smp_setup();
35346859ac8SHuacai Chen 
35446859ac8SHuacai Chen 	if (loongson_sysconf.cores_per_package == 0)
35546859ac8SHuacai Chen 		loongson_sysconf.cores_per_package = num_processors;
35646859ac8SHuacai Chen 
35746859ac8SHuacai Chen 	cpu_data[0].core = cpu_logical_map(0) % loongson_sysconf.cores_per_package;
35846859ac8SHuacai Chen 	cpu_data[0].package = cpu_logical_map(0) / loongson_sysconf.cores_per_package;
359316863cbSBibo Mao 
36046859ac8SHuacai Chen 	pv_ipi_init();
36146859ac8SHuacai Chen 	iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_EN);
36246859ac8SHuacai Chen 	pr_info("Detected %i available CPU(s)\n", loongson_sysconf.nr_cpus);
36346859ac8SHuacai Chen }
36446859ac8SHuacai Chen 
loongson_prepare_cpus(unsigned int max_cpus)365c56ab8e8SHuacai Chen void __init loongson_prepare_cpus(unsigned int max_cpus)
36646859ac8SHuacai Chen {
36746859ac8SHuacai Chen 	int i = 0;
36846859ac8SHuacai Chen 
36974c16b2eSBibo Mao 	parse_acpi_topology();
37046859ac8SHuacai Chen 	cpu_data[0].global_id = cpu_logical_map(0);
37146859ac8SHuacai Chen 
37246859ac8SHuacai Chen 	for (i = 0; i < loongson_sysconf.nr_cpus; i++) {
37346859ac8SHuacai Chen 		set_cpu_present(i, true);
37446859ac8SHuacai Chen 		csr_mail_send(0, __cpu_logical_map[i], 0);
375d4b6f156SHuacai Chen 	}
376d4b6f156SHuacai Chen 
377d4b6f156SHuacai Chen 	per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
37846859ac8SHuacai Chen }
37946859ac8SHuacai Chen 
38046859ac8SHuacai Chen /*
381f6f0c9a7SHuacai Chen  * Setup the PC, SP, and TP of a secondary processor and start it running!
382f6f0c9a7SHuacai Chen  */
loongson_boot_secondary(int cpu,struct task_struct * idle)383a6654a40SHuacai Chen void loongson_boot_secondary(int cpu, struct task_struct *idle)
38446859ac8SHuacai Chen {
38546859ac8SHuacai Chen 	unsigned long entry;
386c56ab8e8SHuacai Chen 
38746859ac8SHuacai Chen 	pr_info("Booting CPU#%d...\n", cpu);
38846859ac8SHuacai Chen 
38946859ac8SHuacai Chen 	entry = __pa_symbol((unsigned long)&smpboot_entry);
39046859ac8SHuacai Chen 	cpuboot_data.stack = (unsigned long)__KSTK_TOS(idle);
39146859ac8SHuacai Chen 	cpuboot_data.thread_info = (unsigned long)task_thread_info(idle);
39246859ac8SHuacai Chen 
39346859ac8SHuacai Chen 	csr_mail_send(entry, cpu_logical_map(cpu), 0);
39446859ac8SHuacai Chen 
395c56ab8e8SHuacai Chen 	loongson_send_ipi_single(cpu, ACTION_BOOT_CPU);
39646859ac8SHuacai Chen }
39746859ac8SHuacai Chen 
39846859ac8SHuacai Chen /*
39946859ac8SHuacai Chen  * SMP init and finish on secondary CPUs
40046859ac8SHuacai Chen  */
loongson_init_secondary(void)40146859ac8SHuacai Chen void loongson_init_secondary(void)
40246859ac8SHuacai Chen {
403d4b6f156SHuacai Chen 	unsigned int cpu = smp_processor_id();
404d4b6f156SHuacai Chen 	unsigned int imask = ECFGF_IP0 | ECFGF_IP1 | ECFGF_IP2 |
405d4b6f156SHuacai Chen 			     ECFGF_IPI | ECFGF_PMC | ECFGF_TIMER | ECFGF_SIP0;
40646859ac8SHuacai Chen 
407752cd08dSHuacai Chen 	change_csr_ecfg(ECFG0_IM, imask);
40846859ac8SHuacai Chen 
40946859ac8SHuacai Chen 	iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_EN);
41046859ac8SHuacai Chen 
41146859ac8SHuacai Chen #ifdef CONFIG_NUMA
41246859ac8SHuacai Chen 	numa_add_cpu(cpu);
41346859ac8SHuacai Chen #endif
41446859ac8SHuacai Chen 	per_cpu(cpu_state, cpu) = CPU_ONLINE;
41546859ac8SHuacai Chen 	cpu_data[cpu].package =
41646859ac8SHuacai Chen 		     cpu_logical_map(cpu) / loongson_sysconf.cores_per_package;
41746859ac8SHuacai Chen 	cpu_data[cpu].core = pptt_enabled ? cpu_data[cpu].core :
418c56ab8e8SHuacai Chen 		     cpu_logical_map(cpu) % loongson_sysconf.cores_per_package;
41946859ac8SHuacai Chen 	cpu_data[cpu].global_id = cpu_logical_map(cpu);
42046859ac8SHuacai Chen }
42146859ac8SHuacai Chen 
loongson_smp_finish(void)42246859ac8SHuacai Chen void loongson_smp_finish(void)
42346859ac8SHuacai Chen {
42446859ac8SHuacai Chen 	local_irq_enable();
42546859ac8SHuacai Chen 	iocsr_write64(0, LOONGARCH_IOCSR_MBUF0);
426c337c849STiezhu Yang 	pr_info("CPU#%d finished\n", smp_processor_id());
42746859ac8SHuacai Chen }
42871610ab1SBibo Mao 
42946859ac8SHuacai Chen #ifdef CONFIG_HOTPLUG_CPU
43046859ac8SHuacai Chen 
loongson_cpu_disable(void)43171610ab1SBibo Mao int loongson_cpu_disable(void)
43246859ac8SHuacai Chen {
43371610ab1SBibo Mao 	unsigned long flags;
43471610ab1SBibo Mao 	unsigned int cpu = smp_processor_id();
43546859ac8SHuacai Chen 
43671610ab1SBibo Mao 	if (io_master(cpu))
43771610ab1SBibo Mao 		return -EBUSY;
43871610ab1SBibo Mao 
43971610ab1SBibo Mao #ifdef CONFIG_NUMA
44071610ab1SBibo Mao 	numa_remove_cpu(cpu);
44171610ab1SBibo Mao #endif
4421001db6cSHuacai Chen 	set_cpu_online(cpu, false);
44371610ab1SBibo Mao 	clear_cpu_sibling_map(cpu);
44471610ab1SBibo Mao 	clear_cpu_llc_shared_map(cpu);
44546859ac8SHuacai Chen 	calculate_cpu_foreign_map();
44646859ac8SHuacai Chen 	local_irq_save(flags);
44713bf7923SJosh Poimboeuf 	irq_migrate_all_off_this_cpu();
44846859ac8SHuacai Chen 	clear_csr_ecfg(ECFG0_IM);
44946859ac8SHuacai Chen 	local_irq_restore(flags);
45046859ac8SHuacai Chen 	local_flush_tlb_all();
45146859ac8SHuacai Chen 
45246859ac8SHuacai Chen 	return 0;
45346859ac8SHuacai Chen }
45446859ac8SHuacai Chen 
loongson_cpu_die(unsigned int cpu)45546859ac8SHuacai Chen void loongson_cpu_die(unsigned int cpu)
45646859ac8SHuacai Chen {
457c56ab8e8SHuacai Chen 	while (per_cpu(cpu_state, cpu) != CPU_DEAD)
45846859ac8SHuacai Chen 		cpu_relax();
45946859ac8SHuacai Chen 
46046859ac8SHuacai Chen 	mb();
46146859ac8SHuacai Chen }
462c56ab8e8SHuacai Chen 
idle_play_dead(void)46346859ac8SHuacai Chen static void __noreturn idle_play_dead(void)
46446859ac8SHuacai Chen {
46546859ac8SHuacai Chen 	register uint64_t addr;
46646859ac8SHuacai Chen 	register void (*init_fn)(void);
467c56ab8e8SHuacai Chen 
468c56ab8e8SHuacai Chen 	idle_task_exit();
469c56ab8e8SHuacai Chen 	local_irq_enable();
47046859ac8SHuacai Chen 	set_csr_ecfg(ECFGF_IPI);
47146859ac8SHuacai Chen 	__this_cpu_write(cpu_state, CPU_DEAD);
47246859ac8SHuacai Chen 
47346859ac8SHuacai Chen 	__smp_mb();
47446859ac8SHuacai Chen 	do {
47546859ac8SHuacai Chen 		__asm__ __volatile__("idle 0\n\t");
47646859ac8SHuacai Chen 		addr = iocsr_read64(LOONGARCH_IOCSR_MBUF0);
47746859ac8SHuacai Chen 	} while (addr == 0);
478c56ab8e8SHuacai Chen 
47946859ac8SHuacai Chen 	local_irq_disable();
48046859ac8SHuacai Chen 	init_fn = (void *)TO_CACHE(addr);
48146859ac8SHuacai Chen 	iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_CLEAR);
48246859ac8SHuacai Chen 
48346859ac8SHuacai Chen 	init_fn();
48446859ac8SHuacai Chen 	BUG();
48546859ac8SHuacai Chen }
486e5ba90abSBibo Mao 
48746859ac8SHuacai Chen #ifdef CONFIG_HIBERNATION
poll_play_dead(void)488d4b6f156SHuacai Chen static void __noreturn poll_play_dead(void)
48946859ac8SHuacai Chen {
49046859ac8SHuacai Chen 	register uint64_t addr;
49146859ac8SHuacai Chen 	register void (*init_fn)(void);
49246859ac8SHuacai Chen 
4933de9c42dSJiaxun Yang 	idle_task_exit();
49446859ac8SHuacai Chen 	__this_cpu_write(cpu_state, CPU_DEAD);
495d4b6f156SHuacai Chen 
496d4b6f156SHuacai Chen 	__smp_mb();
497d4b6f156SHuacai Chen 	do {
498d4b6f156SHuacai Chen 		__asm__ __volatile__("nop\n\t");
499d4b6f156SHuacai Chen 		addr = iocsr_read64(LOONGARCH_IOCSR_MBUF0);
500d4b6f156SHuacai Chen 	} while (addr == 0);
501d4b6f156SHuacai Chen 
502d4b6f156SHuacai Chen 	init_fn = (void *)TO_CACHE(addr);
503d4b6f156SHuacai Chen 	iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_CLEAR);
504d4b6f156SHuacai Chen 
505d4b6f156SHuacai Chen 	init_fn();
506d4b6f156SHuacai Chen 	BUG();
507d4b6f156SHuacai Chen }
508d4b6f156SHuacai Chen #endif
509d4b6f156SHuacai Chen 
510d4b6f156SHuacai Chen static void (*play_dead)(void) = idle_play_dead;
511d4b6f156SHuacai Chen 
arch_cpu_idle_dead(void)512d4b6f156SHuacai Chen void __noreturn arch_cpu_idle_dead(void)
513d4b6f156SHuacai Chen {
514d4b6f156SHuacai Chen 	play_dead();
515d4b6f156SHuacai Chen 	BUG(); /* play_dead() doesn't return */
516d4b6f156SHuacai Chen }
517d4b6f156SHuacai Chen 
518d4b6f156SHuacai Chen #ifdef CONFIG_HIBERNATION
hibernate_resume_nonboot_cpu_disable(void)519e5ba90abSBibo Mao int hibernate_resume_nonboot_cpu_disable(void)
520e5ba90abSBibo Mao {
52146859ac8SHuacai Chen 	int ret;
52246859ac8SHuacai Chen 
52346859ac8SHuacai Chen 	play_dead = poll_play_dead;
52446859ac8SHuacai Chen 	ret = suspend_disable_secondary_cpus();
52546859ac8SHuacai Chen 	play_dead = idle_play_dead;
52646859ac8SHuacai Chen 
52746859ac8SHuacai Chen 	return ret;
528c56ab8e8SHuacai Chen }
52946859ac8SHuacai Chen #endif
53046859ac8SHuacai Chen 
53146859ac8SHuacai Chen #endif
53246859ac8SHuacai Chen 
53346859ac8SHuacai Chen /*
53446859ac8SHuacai Chen  * Power management
53546859ac8SHuacai Chen  */
53646859ac8SHuacai Chen #ifdef CONFIG_PM
53746859ac8SHuacai Chen 
loongson_ipi_suspend(void)53846859ac8SHuacai Chen static int loongson_ipi_suspend(void)
539c56ab8e8SHuacai Chen {
54046859ac8SHuacai Chen 	return 0;
54146859ac8SHuacai Chen }
54246859ac8SHuacai Chen 
loongson_ipi_resume(void)54346859ac8SHuacai Chen static void loongson_ipi_resume(void)
54446859ac8SHuacai Chen {
54546859ac8SHuacai Chen 	iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_EN);
54646859ac8SHuacai Chen }
54746859ac8SHuacai Chen 
54846859ac8SHuacai Chen static struct syscore_ops loongson_ipi_syscore_ops = {
54946859ac8SHuacai Chen 	.resume         = loongson_ipi_resume,
55046859ac8SHuacai Chen 	.suspend        = loongson_ipi_suspend,
55146859ac8SHuacai Chen };
55246859ac8SHuacai Chen 
55346859ac8SHuacai Chen /*
55446859ac8SHuacai Chen  * Enable boot cpu ipi before enabling nonboot cpus
55546859ac8SHuacai Chen  * during syscore_resume.
55646859ac8SHuacai Chen  */
ipi_pm_init(void)55746859ac8SHuacai Chen static int __init ipi_pm_init(void)
55846859ac8SHuacai Chen {
55946859ac8SHuacai Chen 	register_syscore_ops(&loongson_ipi_syscore_ops);
56046859ac8SHuacai Chen 	return 0;
56146859ac8SHuacai Chen }
56246859ac8SHuacai Chen 
563a2ccf463SHuacai Chen core_initcall(ipi_pm_init);
56446859ac8SHuacai Chen #endif
56546859ac8SHuacai Chen 
56646859ac8SHuacai Chen /* Preload SMP state for boot cpu */
smp_prepare_boot_cpu(void)56746859ac8SHuacai Chen void __init smp_prepare_boot_cpu(void)
568c56ab8e8SHuacai Chen {
56946859ac8SHuacai Chen 	unsigned int cpu, node, rr_node;
57046859ac8SHuacai Chen 
57146859ac8SHuacai Chen 	set_cpu_possible(0, true);
57246859ac8SHuacai Chen 	set_cpu_online(0, true);
57346859ac8SHuacai Chen 	set_my_cpu_offset(per_cpu_offset(0));
57446859ac8SHuacai Chen 	numa_add_cpu(0);
57546859ac8SHuacai Chen 
57646859ac8SHuacai Chen 	rr_node = first_node(node_online_map);
57746859ac8SHuacai Chen 	for_each_possible_cpu(cpu) {
57846859ac8SHuacai Chen 		node = early_cpu_to_node(cpu);
57946859ac8SHuacai Chen 
58046859ac8SHuacai Chen 		/*
58146859ac8SHuacai Chen 		 * The mapping between present cpus and nodes has been
58246859ac8SHuacai Chen 		 * built during MADT and SRAT parsing.
58346859ac8SHuacai Chen 		 *
58446859ac8SHuacai Chen 		 * If possible cpus = present cpus here, early_cpu_to_node
58546859ac8SHuacai Chen 		 * will return valid node.
58646859ac8SHuacai Chen 		 *
58746859ac8SHuacai Chen 		 * If possible cpus > present cpus here (e.g. some possible
58846859ac8SHuacai Chen 		 * cpus will be added by cpu-hotplug later), for possible but
58946859ac8SHuacai Chen 		 * not present cpus, early_cpu_to_node will return NUMA_NO_NODE,
590c56ab8e8SHuacai Chen 		 * and we just map them to online nodes in round-robin way.
59146859ac8SHuacai Chen 		 * Once hotplugged, new correct mapping will be built for them.
59246859ac8SHuacai Chen 		 */
59346859ac8SHuacai Chen 		if (node != NUMA_NO_NODE)
594c56ab8e8SHuacai Chen 			set_cpu_numa_node(cpu, node);
59546859ac8SHuacai Chen 		else {
59646859ac8SHuacai Chen 			set_cpu_numa_node(cpu, rr_node);
59746859ac8SHuacai Chen 			rr_node = next_node_in(rr_node, node_online_map);
59846859ac8SHuacai Chen 		}
59946859ac8SHuacai Chen 	}
60046859ac8SHuacai Chen 
60146859ac8SHuacai Chen 	pv_spinlock_init();
60246859ac8SHuacai Chen }
60346859ac8SHuacai Chen 
60446859ac8SHuacai Chen /* called from main before smp_init() */
smp_prepare_cpus(unsigned int max_cpus)60546859ac8SHuacai Chen void __init smp_prepare_cpus(unsigned int max_cpus)
60646859ac8SHuacai Chen {
60746859ac8SHuacai Chen 	init_new_context(current, &init_mm);
60846859ac8SHuacai Chen 	current_thread_info()->cpu = 0;
60946859ac8SHuacai Chen 	loongson_prepare_cpus(max_cpus);
61046859ac8SHuacai Chen 	set_cpu_sibling_map(0);
61146859ac8SHuacai Chen 	set_cpu_llc_shared_map(0);
61246859ac8SHuacai Chen 	set_cpu_core_map(0);
61346859ac8SHuacai Chen 	calculate_cpu_foreign_map();
61446859ac8SHuacai Chen #ifndef CONFIG_HOTPLUG_CPU
61546859ac8SHuacai Chen 	init_cpu_present(cpu_possible_mask);
616c718a0baSBibo Mao #endif
61746859ac8SHuacai Chen }
61846859ac8SHuacai Chen 
__cpu_up(unsigned int cpu,struct task_struct * tidle)61946859ac8SHuacai Chen int __cpu_up(unsigned int cpu, struct task_struct *tidle)
62046859ac8SHuacai Chen {
621c718a0baSBibo Mao 	loongson_boot_secondary(cpu, tidle);
62246859ac8SHuacai Chen 
62346859ac8SHuacai Chen 	/* Wait for CPU to start and be ready to sync counters */
62446859ac8SHuacai Chen 	if (!wait_for_completion_timeout(&cpu_starting,
62546859ac8SHuacai Chen 					 msecs_to_jiffies(5000))) {
62646859ac8SHuacai Chen 		pr_crit("CPU%u: failed to start\n", cpu);
62746859ac8SHuacai Chen 		return -EIO;
62846859ac8SHuacai Chen 	}
62946859ac8SHuacai Chen 
63046859ac8SHuacai Chen 	/* Wait for CPU to finish startup & mark itself online before return */
63146859ac8SHuacai Chen 	wait_for_completion(&cpu_running);
63246859ac8SHuacai Chen 
63346859ac8SHuacai Chen 	return 0;
63446859ac8SHuacai Chen }
63546859ac8SHuacai Chen 
63646859ac8SHuacai Chen /*
63746859ac8SHuacai Chen  * First C code run on the secondary CPUs after being started up by
63846859ac8SHuacai Chen  * the master.
63946859ac8SHuacai Chen  */
start_secondary(void)64046859ac8SHuacai Chen asmlinkage void start_secondary(void)
64146859ac8SHuacai Chen {
64246859ac8SHuacai Chen 	unsigned int cpu;
64346859ac8SHuacai Chen 
64446859ac8SHuacai Chen 	sync_counter();
64546859ac8SHuacai Chen 	cpu = raw_smp_processor_id();
64646859ac8SHuacai Chen 	set_my_cpu_offset(per_cpu_offset(cpu));
64746859ac8SHuacai Chen 
64846859ac8SHuacai Chen 	cpu_probe();
64946859ac8SHuacai Chen 	constant_clockevent_init();
65046859ac8SHuacai Chen 	loongson_init_secondary();
65146859ac8SHuacai Chen 
65246859ac8SHuacai Chen 	set_cpu_sibling_map(cpu);
65346859ac8SHuacai Chen 	set_cpu_llc_shared_map(cpu);
65446859ac8SHuacai Chen 	set_cpu_core_map(cpu);
65546859ac8SHuacai Chen 
65646859ac8SHuacai Chen 	notify_cpu_starting(cpu);
65746859ac8SHuacai Chen 
65846859ac8SHuacai Chen 	/* Notify boot CPU that we're starting */
65946859ac8SHuacai Chen 	complete(&cpu_starting);
66046859ac8SHuacai Chen 
66146859ac8SHuacai Chen 	/* The CPU is running, now mark it online */
66246859ac8SHuacai Chen 	set_cpu_online(cpu, true);
66346859ac8SHuacai Chen 
66446859ac8SHuacai Chen 	calculate_cpu_foreign_map();
66546859ac8SHuacai Chen 
66646859ac8SHuacai Chen 	/*
66746859ac8SHuacai Chen 	 * Notify boot CPU that we're up & online and it can safely return
66846859ac8SHuacai Chen 	 * from __cpu_up()
66946859ac8SHuacai Chen 	 */
67046859ac8SHuacai Chen 	complete(&cpu_running);
67146859ac8SHuacai Chen 
67246859ac8SHuacai Chen 	/*
67346859ac8SHuacai Chen 	 * irq will be enabled in loongson_smp_finish(), enabling it too
67446859ac8SHuacai Chen 	 * early is dangerous.
67546859ac8SHuacai Chen 	 */
67646859ac8SHuacai Chen 	WARN_ON_ONCE(!irqs_disabled());
67746859ac8SHuacai Chen 	loongson_smp_finish();
67846859ac8SHuacai Chen 
67946859ac8SHuacai Chen 	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
68046859ac8SHuacai Chen }
68146859ac8SHuacai Chen 
smp_cpus_done(unsigned int max_cpus)68246859ac8SHuacai Chen void __init smp_cpus_done(unsigned int max_cpus)
68346859ac8SHuacai Chen {
68446859ac8SHuacai Chen }
68546859ac8SHuacai Chen 
stop_this_cpu(void * dummy)68646859ac8SHuacai Chen static void stop_this_cpu(void *dummy)
68746859ac8SHuacai Chen {
68846859ac8SHuacai Chen 	set_cpu_online(smp_processor_id(), false);
68946859ac8SHuacai Chen 	calculate_cpu_foreign_map();
69046859ac8SHuacai Chen 	local_irq_disable();
691d4b6f156SHuacai Chen 	while (true);
69246859ac8SHuacai Chen }
69346859ac8SHuacai Chen 
smp_send_stop(void)69446859ac8SHuacai Chen void smp_send_stop(void)
69546859ac8SHuacai Chen {
69646859ac8SHuacai Chen 	smp_call_function(stop_this_cpu, NULL, 0);
69746859ac8SHuacai Chen }
69846859ac8SHuacai Chen 
69946859ac8SHuacai Chen #ifdef CONFIG_PROFILING
setup_profiling_timer(unsigned int multiplier)70046859ac8SHuacai Chen int setup_profiling_timer(unsigned int multiplier)
70146859ac8SHuacai Chen {
70246859ac8SHuacai Chen 	return 0;
70346859ac8SHuacai Chen }
70446859ac8SHuacai Chen #endif
70546859ac8SHuacai Chen 
flush_tlb_all_ipi(void * info)70646859ac8SHuacai Chen static void flush_tlb_all_ipi(void *info)
70746859ac8SHuacai Chen {
70846859ac8SHuacai Chen 	local_flush_tlb_all();
70946859ac8SHuacai Chen }
71046859ac8SHuacai Chen 
flush_tlb_all(void)71146859ac8SHuacai Chen void flush_tlb_all(void)
71246859ac8SHuacai Chen {
71346859ac8SHuacai Chen 	on_each_cpu(flush_tlb_all_ipi, NULL, 1);
71446859ac8SHuacai Chen }
71546859ac8SHuacai Chen 
flush_tlb_mm_ipi(void * mm)71646859ac8SHuacai Chen static void flush_tlb_mm_ipi(void *mm)
71746859ac8SHuacai Chen {
71846859ac8SHuacai Chen 	local_flush_tlb_mm((struct mm_struct *)mm);
71946859ac8SHuacai Chen }
72046859ac8SHuacai Chen 
flush_tlb_mm(struct mm_struct * mm)72146859ac8SHuacai Chen void flush_tlb_mm(struct mm_struct *mm)
72246859ac8SHuacai Chen {
72346859ac8SHuacai Chen 	if (atomic_read(&mm->mm_users) == 0)
72446859ac8SHuacai Chen 		return;		/* happens as a result of exit_mmap() */
72546859ac8SHuacai Chen 
72646859ac8SHuacai Chen 	preempt_disable();
72746859ac8SHuacai Chen 
72846859ac8SHuacai Chen 	if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
72946859ac8SHuacai Chen 		on_each_cpu_mask(mm_cpumask(mm), flush_tlb_mm_ipi, mm, 1);
73046859ac8SHuacai Chen 	} else {
73146859ac8SHuacai Chen 		unsigned int cpu;
73246859ac8SHuacai Chen 
73346859ac8SHuacai Chen 		for_each_online_cpu(cpu) {
73446859ac8SHuacai Chen 			if (cpu != smp_processor_id() && cpu_context(cpu, mm))
73546859ac8SHuacai Chen 				cpu_context(cpu, mm) = 0;
73646859ac8SHuacai Chen 		}
737d4b6f156SHuacai Chen 		local_flush_tlb_mm(mm);
73846859ac8SHuacai Chen 	}
73946859ac8SHuacai Chen 
74046859ac8SHuacai Chen 	preempt_enable();
74146859ac8SHuacai Chen }
74246859ac8SHuacai Chen 
74346859ac8SHuacai Chen struct flush_tlb_data {
74446859ac8SHuacai Chen 	struct vm_area_struct *vma;
74546859ac8SHuacai Chen 	unsigned long addr1;
74646859ac8SHuacai Chen 	unsigned long addr2;
74746859ac8SHuacai Chen };
74846859ac8SHuacai Chen 
flush_tlb_range_ipi(void * info)74946859ac8SHuacai Chen static void flush_tlb_range_ipi(void *info)
75046859ac8SHuacai Chen {
75146859ac8SHuacai Chen 	struct flush_tlb_data *fd = info;
75246859ac8SHuacai Chen 
75346859ac8SHuacai Chen 	local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
75446859ac8SHuacai Chen }
75546859ac8SHuacai Chen 
flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)75646859ac8SHuacai Chen void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
757 {
758 	struct mm_struct *mm = vma->vm_mm;
759 
760 	preempt_disable();
761 	if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
762 		struct flush_tlb_data fd = {
763 			.vma = vma,
764 			.addr1 = start,
765 			.addr2 = end,
766 		};
767 
768 		on_each_cpu_mask(mm_cpumask(mm), flush_tlb_range_ipi, &fd, 1);
769 	} else {
770 		unsigned int cpu;
771 
772 		for_each_online_cpu(cpu) {
773 			if (cpu != smp_processor_id() && cpu_context(cpu, mm))
774 				cpu_context(cpu, mm) = 0;
775 		}
776 		local_flush_tlb_range(vma, start, end);
777 	}
778 	preempt_enable();
779 }
780 
flush_tlb_kernel_range_ipi(void * info)781 static void flush_tlb_kernel_range_ipi(void *info)
782 {
783 	struct flush_tlb_data *fd = info;
784 
785 	local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
786 }
787 
flush_tlb_kernel_range(unsigned long start,unsigned long end)788 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
789 {
790 	struct flush_tlb_data fd = {
791 		.addr1 = start,
792 		.addr2 = end,
793 	};
794 
795 	on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1);
796 }
797 
flush_tlb_page_ipi(void * info)798 static void flush_tlb_page_ipi(void *info)
799 {
800 	struct flush_tlb_data *fd = info;
801 
802 	local_flush_tlb_page(fd->vma, fd->addr1);
803 }
804 
flush_tlb_page(struct vm_area_struct * vma,unsigned long page)805 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
806 {
807 	preempt_disable();
808 	if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
809 		struct flush_tlb_data fd = {
810 			.vma = vma,
811 			.addr1 = page,
812 		};
813 
814 		on_each_cpu_mask(mm_cpumask(vma->vm_mm), flush_tlb_page_ipi, &fd, 1);
815 	} else {
816 		unsigned int cpu;
817 
818 		for_each_online_cpu(cpu) {
819 			if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
820 				cpu_context(cpu, vma->vm_mm) = 0;
821 		}
822 		local_flush_tlb_page(vma, page);
823 	}
824 	preempt_enable();
825 }
826 EXPORT_SYMBOL(flush_tlb_page);
827 
flush_tlb_one_ipi(void * info)828 static void flush_tlb_one_ipi(void *info)
829 {
830 	unsigned long vaddr = (unsigned long) info;
831 
832 	local_flush_tlb_one(vaddr);
833 }
834 
flush_tlb_one(unsigned long vaddr)835 void flush_tlb_one(unsigned long vaddr)
836 {
837 	on_each_cpu(flush_tlb_one_ipi, (void *)vaddr, 1);
838 }
839 EXPORT_SYMBOL(flush_tlb_one);
840