1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * Copyright (C) 2013 Imagination Technologies
4 * Author: Paul Burton <paul.burton@mips.com>
5 */
6
7 #ifndef __MIPS_ASM_MIPS_CPS_H__
8 # error Please include asm/mips-cps.h rather than asm/mips-cm.h
9 #endif
10
11 #ifndef __MIPS_ASM_MIPS_CM_H__
12 #define __MIPS_ASM_MIPS_CM_H__
13
14 #include <linux/bitfield.h>
15 #include <linux/bitops.h>
16 #include <linux/errno.h>
17
18 /* The base address of the CM GCR block */
19 extern void __iomem *mips_gcr_base;
20
21 /* The base address of the CM L2-only sync region */
22 extern void __iomem *mips_cm_l2sync_base;
23
24 /**
25 * mips_cm_phys_base - retrieve the physical base address of the CM
26 *
27 * This function returns the physical base address of the Coherence Manager
28 * global control block, or 0 if no Coherence Manager is present. It provides
29 * a default implementation which reads the CMGCRBase register where available,
30 * and may be overridden by platforms which determine this address in a
31 * different way by defining a function with the same prototype.
32 */
33 extern phys_addr_t mips_cm_phys_base(void);
34
35 /**
36 * mips_cm_l2sync_phys_base - retrieve the physical base address of the CM
37 * L2-sync region
38 *
39 * This function returns the physical base address of the Coherence Manager
40 * L2-cache only region. It provides a default implementation which reads the
41 * CMGCRL2OnlySyncBase register where available or returns a 4K region just
42 * behind the CM GCR base address. It may be overridden by platforms which
43 * determine this address in a different way by defining a function with the
44 * same prototype.
45 */
46 extern phys_addr_t mips_cm_l2sync_phys_base(void);
47
48 /*
49 * mips_cm_is64 - determine CM register width
50 *
51 * The CM register width is determined by the version of the CM, with CM3
52 * introducing 64 bit GCRs and all prior CM versions having 32 bit GCRs.
53 * However we may run a kernel built for MIPS32 on a system with 64 bit GCRs,
54 * or vice-versa. This variable indicates the width of the memory accesses
55 * that the kernel will perform to GCRs, which may differ from the actual
56 * width of the GCRs.
57 *
58 * It's set to 0 for 32-bit accesses and 1 for 64-bit accesses.
59 */
60 extern int mips_cm_is64;
61
62 /*
63 * mips_cm_is_l2_hci_broken - determine if HCI is broken
64 *
65 * Some CM reports show that Hardware Cache Initialization is
66 * complete, but in reality it's not the case. They also incorrectly
67 * indicate that Hardware Cache Initialization is supported. This
68 * flags allows warning about this broken feature.
69 */
70 extern bool mips_cm_is_l2_hci_broken;
71
72 /**
73 * mips_cm_error_report - Report CM cache errors
74 */
75 #ifdef CONFIG_MIPS_CM
76 extern void mips_cm_error_report(void);
77 #else
mips_cm_error_report(void)78 static inline void mips_cm_error_report(void) {}
79 #endif
80
81 /**
82 * mips_cm_probe - probe for a Coherence Manager
83 *
84 * Attempt to detect the presence of a Coherence Manager. Returns 0 if a CM
85 * is successfully detected, else -errno.
86 */
87 #ifdef CONFIG_MIPS_CM
88 extern int mips_cm_probe(void);
89 #else
mips_cm_probe(void)90 static inline int mips_cm_probe(void)
91 {
92 return -ENODEV;
93 }
94 #endif
95
96 /**
97 * mips_cm_present - determine whether a Coherence Manager is present
98 *
99 * Returns true if a CM is present in the system, else false.
100 */
mips_cm_present(void)101 static inline bool mips_cm_present(void)
102 {
103 #ifdef CONFIG_MIPS_CM
104 return mips_gcr_base != NULL;
105 #else
106 return false;
107 #endif
108 }
109
110 /**
111 * mips_cm_update_property - update property from the device tree
112 *
113 * Retrieve the properties from the device tree if a CM node exist and
114 * update the internal variable based on this.
115 */
116 #ifdef CONFIG_MIPS_CM
117 extern void mips_cm_update_property(void);
118 #else
mips_cm_update_property(void)119 static inline void mips_cm_update_property(void) {}
120 #endif
121
122 /**
123 * mips_cm_has_l2sync - determine whether an L2-only sync region is present
124 *
125 * Returns true if the system implements an L2-only sync region, else false.
126 */
mips_cm_has_l2sync(void)127 static inline bool mips_cm_has_l2sync(void)
128 {
129 #ifdef CONFIG_MIPS_CM
130 return mips_cm_l2sync_base != NULL;
131 #else
132 return false;
133 #endif
134 }
135
136 /* Offsets to register blocks from the CM base address */
137 #define MIPS_CM_GCB_OFS 0x0000 /* Global Control Block */
138 #define MIPS_CM_CLCB_OFS 0x2000 /* Core Local Control Block */
139 #define MIPS_CM_COCB_OFS 0x4000 /* Core Other Control Block */
140 #define MIPS_CM_GDB_OFS 0x6000 /* Global Debug Block */
141
142 /* Total size of the CM memory mapped registers */
143 #define MIPS_CM_GCR_SIZE 0x8000
144
145 /* Size of the L2-only sync region */
146 #define MIPS_CM_L2SYNC_SIZE 0x1000
147
148 #define GCR_ACCESSOR_RO(sz, off, name) \
149 CPS_ACCESSOR_RO(gcr, sz, MIPS_CM_GCB_OFS + off, name) \
150 CPS_ACCESSOR_RO(gcr, sz, MIPS_CM_COCB_OFS + off, redir_##name)
151
152 #define GCR_ACCESSOR_RW(sz, off, name) \
153 CPS_ACCESSOR_RW(gcr, sz, MIPS_CM_GCB_OFS + off, name) \
154 CPS_ACCESSOR_RW(gcr, sz, MIPS_CM_COCB_OFS + off, redir_##name)
155
156 #define GCR_CX_ACCESSOR_RO(sz, off, name) \
157 CPS_ACCESSOR_RO(gcr, sz, MIPS_CM_CLCB_OFS + off, cl_##name) \
158 CPS_ACCESSOR_RO(gcr, sz, MIPS_CM_COCB_OFS + off, co_##name)
159
160 #define GCR_CX_ACCESSOR_RW(sz, off, name) \
161 CPS_ACCESSOR_RW(gcr, sz, MIPS_CM_CLCB_OFS + off, cl_##name) \
162 CPS_ACCESSOR_RW(gcr, sz, MIPS_CM_COCB_OFS + off, co_##name)
163
164 /* GCR_CONFIG - Information about the system */
165 GCR_ACCESSOR_RO(64, 0x000, config)
166 #define CM_GCR_CONFIG_CLUSTER_COH_CAPABLE BIT_ULL(43)
167 #define CM_GCR_CONFIG_CLUSTER_ID GENMASK_ULL(39, 32)
168 #define CM_GCR_CONFIG_NUM_CLUSTERS GENMASK(29, 23)
169 #define CM_GCR_CONFIG_NUMIOCU GENMASK(15, 8)
170 #define CM_GCR_CONFIG_PCORES GENMASK(7, 0)
171
172 /* GCR_BASE - Base address of the Global Configuration Registers (GCRs) */
173 GCR_ACCESSOR_RW(64, 0x008, base)
174 #define CM_GCR_BASE_GCRBASE GENMASK_ULL(47, 15)
175 #define CM_GCR_BASE_CMDEFTGT GENMASK(1, 0)
176 #define CM_GCR_BASE_CMDEFTGT_MEM 0
177 #define CM_GCR_BASE_CMDEFTGT_RESERVED 1
178 #define CM_GCR_BASE_CMDEFTGT_IOCU0 2
179 #define CM_GCR_BASE_CMDEFTGT_IOCU1 3
180
181 /* GCR_ACCESS - Controls core/IOCU access to GCRs */
182 GCR_ACCESSOR_RW(32, 0x020, access)
183 #define CM_GCR_ACCESS_ACCESSEN GENMASK(7, 0)
184
185 /* GCR_REV - Indicates the Coherence Manager revision */
186 GCR_ACCESSOR_RO(32, 0x030, rev)
187 #define CM_GCR_REV_MAJOR GENMASK(15, 8)
188 #define CM_GCR_REV_MINOR GENMASK(7, 0)
189
190 #define CM_ENCODE_REV(major, minor) \
191 (FIELD_PREP(CM_GCR_REV_MAJOR, major) | \
192 FIELD_PREP(CM_GCR_REV_MINOR, minor))
193
194 #define CM_REV_CM2 CM_ENCODE_REV(6, 0)
195 #define CM_REV_CM2_5 CM_ENCODE_REV(7, 0)
196 #define CM_REV_CM3 CM_ENCODE_REV(8, 0)
197 #define CM_REV_CM3_5 CM_ENCODE_REV(9, 0)
198
199 /* GCR_ERR_CONTROL - Control error checking logic */
200 GCR_ACCESSOR_RW(32, 0x038, err_control)
201 #define CM_GCR_ERR_CONTROL_L2_ECC_EN BIT(1)
202 #define CM_GCR_ERR_CONTROL_L2_ECC_SUPPORT BIT(0)
203
204 /* GCR_ERR_MASK - Control which errors are reported as interrupts */
205 GCR_ACCESSOR_RW(64, 0x040, error_mask)
206
207 /* GCR_ERR_CAUSE - Indicates the type of error that occurred */
208 GCR_ACCESSOR_RW(64, 0x048, error_cause)
209 #define CM_GCR_ERROR_CAUSE_ERRTYPE GENMASK(31, 27)
210 #define CM3_GCR_ERROR_CAUSE_ERRTYPE GENMASK_ULL(63, 58)
211 #define CM_GCR_ERROR_CAUSE_ERRINFO GENMASK(26, 0)
212
213 /* GCR_ERR_ADDR - Indicates the address associated with an error */
214 GCR_ACCESSOR_RW(64, 0x050, error_addr)
215
216 /* GCR_ERR_MULT - Indicates when multiple errors have occurred */
217 GCR_ACCESSOR_RW(64, 0x058, error_mult)
218 #define CM_GCR_ERROR_MULT_ERR2ND GENMASK(4, 0)
219
220 /* GCR_L2_ONLY_SYNC_BASE - Base address of the L2 cache-only sync region */
221 GCR_ACCESSOR_RW(64, 0x070, l2_only_sync_base)
222 #define CM_GCR_L2_ONLY_SYNC_BASE_SYNCBASE GENMASK(31, 12)
223 #define CM_GCR_L2_ONLY_SYNC_BASE_SYNCEN BIT(0)
224
225 /* GCR_GIC_BASE - Base address of the Global Interrupt Controller (GIC) */
226 GCR_ACCESSOR_RW(64, 0x080, gic_base)
227 #define CM_GCR_GIC_BASE_GICBASE GENMASK(31, 17)
228 #define CM_GCR_GIC_BASE_GICEN BIT(0)
229
230 /* GCR_CPC_BASE - Base address of the Cluster Power Controller (CPC) */
231 GCR_ACCESSOR_RW(64, 0x088, cpc_base)
232 #define CM_GCR_CPC_BASE_CPCBASE GENMASK(31, 15)
233 #define CM_GCR_CPC_BASE_CPCEN BIT(0)
234
235 /* GCR_REGn_BASE - Base addresses of CM address regions */
236 GCR_ACCESSOR_RW(64, 0x090, reg0_base)
237 GCR_ACCESSOR_RW(64, 0x0a0, reg1_base)
238 GCR_ACCESSOR_RW(64, 0x0b0, reg2_base)
239 GCR_ACCESSOR_RW(64, 0x0c0, reg3_base)
240 #define CM_GCR_REGn_BASE_BASEADDR GENMASK(31, 16)
241
242 /* GCR_REGn_MASK - Size & destination of CM address regions */
243 GCR_ACCESSOR_RW(64, 0x098, reg0_mask)
244 GCR_ACCESSOR_RW(64, 0x0a8, reg1_mask)
245 GCR_ACCESSOR_RW(64, 0x0b8, reg2_mask)
246 GCR_ACCESSOR_RW(64, 0x0c8, reg3_mask)
247 #define CM_GCR_REGn_MASK_ADDRMASK GENMASK(31, 16)
248 #define CM_GCR_REGn_MASK_CCAOVR GENMASK(7, 5)
249 #define CM_GCR_REGn_MASK_CCAOVREN BIT(4)
250 #define CM_GCR_REGn_MASK_DROPL2 BIT(2)
251 #define CM_GCR_REGn_MASK_CMTGT GENMASK(1, 0)
252 #define CM_GCR_REGn_MASK_CMTGT_DISABLED 0x0
253 #define CM_GCR_REGn_MASK_CMTGT_MEM 0x1
254 #define CM_GCR_REGn_MASK_CMTGT_IOCU0 0x2
255 #define CM_GCR_REGn_MASK_CMTGT_IOCU1 0x3
256
257 /* GCR_GIC_STATUS - Indicates presence of a Global Interrupt Controller (GIC) */
258 GCR_ACCESSOR_RO(32, 0x0d0, gic_status)
259 #define CM_GCR_GIC_STATUS_EX BIT(0)
260
261 /* GCR_CPC_STATUS - Indicates presence of a Cluster Power Controller (CPC) */
262 GCR_ACCESSOR_RO(32, 0x0f0, cpc_status)
263 #define CM_GCR_CPC_STATUS_EX BIT(0)
264
265 /* GCR_ACCESS - Controls core/IOCU access to GCRs */
266 GCR_ACCESSOR_RW(32, 0x120, access_cm3)
267 #define CM_GCR_ACCESS_ACCESSEN GENMASK(7, 0)
268
269 /* GCR_L2_CONFIG - Indicates L2 cache configuration when Config5.L2C=1 */
270 GCR_ACCESSOR_RW(32, 0x130, l2_config)
271 #define CM_GCR_L2_CONFIG_BYPASS BIT(20)
272 #define CM_GCR_L2_CONFIG_SET_SIZE GENMASK(15, 12)
273 #define CM_GCR_L2_CONFIG_LINE_SIZE GENMASK(11, 8)
274 #define CM_GCR_L2_CONFIG_ASSOC GENMASK(7, 0)
275
276 /* GCR_SYS_CONFIG2 - Further information about the system */
277 GCR_ACCESSOR_RO(32, 0x150, sys_config2)
278 #define CM_GCR_SYS_CONFIG2_MAXVPW GENMASK(3, 0)
279
280 /* GCR_L2-RAM_CONFIG - Configuration & status of L2 cache RAMs */
281 GCR_ACCESSOR_RW(64, 0x240, l2_ram_config)
282 #define CM_GCR_L2_RAM_CONFIG_PRESENT BIT(31)
283 #define CM_GCR_L2_RAM_CONFIG_HCI_DONE BIT(30)
284 #define CM_GCR_L2_RAM_CONFIG_HCI_SUPPORTED BIT(29)
285
286 /* GCR_L2_PFT_CONTROL - Controls hardware L2 prefetching */
287 GCR_ACCESSOR_RW(32, 0x300, l2_pft_control)
288 #define CM_GCR_L2_PFT_CONTROL_PAGEMASK GENMASK(31, 12)
289 #define CM_GCR_L2_PFT_CONTROL_PFTEN BIT(8)
290 #define CM_GCR_L2_PFT_CONTROL_NPFT GENMASK(7, 0)
291
292 /* GCR_L2_PFT_CONTROL_B - Controls hardware L2 prefetching */
293 GCR_ACCESSOR_RW(32, 0x308, l2_pft_control_b)
294 #define CM_GCR_L2_PFT_CONTROL_B_CEN BIT(8)
295 #define CM_GCR_L2_PFT_CONTROL_B_PORTID GENMASK(7, 0)
296
297 /* GCR_L2_TAG_ADDR - Access addresses in L2 cache tags */
298 GCR_ACCESSOR_RW(64, 0x600, l2_tag_addr)
299
300 /* GCR_L2_TAG_STATE - Access L2 cache tag state */
301 GCR_ACCESSOR_RW(64, 0x608, l2_tag_state)
302
303 /* GCR_L2_DATA - Access data in L2 cache lines */
304 GCR_ACCESSOR_RW(64, 0x610, l2_data)
305
306 /* GCR_L2_ECC - Access ECC information from L2 cache lines */
307 GCR_ACCESSOR_RW(64, 0x618, l2_ecc)
308
309 /* GCR_L2SM_COP - L2 cache op state machine control */
310 GCR_ACCESSOR_RW(32, 0x620, l2sm_cop)
311 #define CM_GCR_L2SM_COP_PRESENT BIT(31)
312 #define CM_GCR_L2SM_COP_RESULT GENMASK(8, 6)
313 #define CM_GCR_L2SM_COP_RESULT_DONTCARE 0
314 #define CM_GCR_L2SM_COP_RESULT_DONE_OK 1
315 #define CM_GCR_L2SM_COP_RESULT_DONE_ERROR 2
316 #define CM_GCR_L2SM_COP_RESULT_ABORT_OK 3
317 #define CM_GCR_L2SM_COP_RESULT_ABORT_ERROR 4
318 #define CM_GCR_L2SM_COP_RUNNING BIT(5)
319 #define CM_GCR_L2SM_COP_TYPE GENMASK(4, 2)
320 #define CM_GCR_L2SM_COP_TYPE_IDX_WBINV 0
321 #define CM_GCR_L2SM_COP_TYPE_IDX_STORETAG 1
322 #define CM_GCR_L2SM_COP_TYPE_IDX_STORETAGDATA 2
323 #define CM_GCR_L2SM_COP_TYPE_HIT_INV 4
324 #define CM_GCR_L2SM_COP_TYPE_HIT_WBINV 5
325 #define CM_GCR_L2SM_COP_TYPE_HIT_WB 6
326 #define CM_GCR_L2SM_COP_TYPE_FETCHLOCK 7
327 #define CM_GCR_L2SM_COP_CMD GENMASK(1, 0)
328 #define CM_GCR_L2SM_COP_CMD_START 1 /* only when idle */
329 #define CM_GCR_L2SM_COP_CMD_ABORT 3 /* only when running */
330
331 /* GCR_L2SM_TAG_ADDR_COP - L2 cache op state machine address control */
332 GCR_ACCESSOR_RW(64, 0x628, l2sm_tag_addr_cop)
333 #define CM_GCR_L2SM_TAG_ADDR_COP_NUM_LINES GENMASK_ULL(63, 48)
334 #define CM_GCR_L2SM_TAG_ADDR_COP_START_TAG GENMASK_ULL(47, 6)
335
336 /* GCR_BEV_BASE - Controls the location of the BEV for powered up cores */
337 GCR_ACCESSOR_RW(64, 0x680, bev_base)
338
339 /* GCR_Cx_RESET_RELEASE - Controls core reset for CM 1.x */
340 GCR_CX_ACCESSOR_RW(32, 0x000, reset_release)
341
342 /* GCR_Cx_COHERENCE - Controls core coherence */
343 GCR_CX_ACCESSOR_RW(32, 0x008, coherence)
344 #define CM_GCR_Cx_COHERENCE_COHDOMAINEN GENMASK(7, 0)
345 #define CM3_GCR_Cx_COHERENCE_COHEN BIT(0)
346
347 /* GCR_Cx_CONFIG - Information about a core's configuration */
348 GCR_CX_ACCESSOR_RO(32, 0x010, config)
349 #define CM_GCR_Cx_CONFIG_IOCUTYPE GENMASK(11, 10)
350 #define CM_GCR_Cx_CONFIG_PVPE GENMASK(9, 0)
351
352 /* GCR_Cx_OTHER - Configure the core-other/redirect GCR block */
353 GCR_CX_ACCESSOR_RW(32, 0x018, other)
354 #define CM_GCR_Cx_OTHER_CORENUM GENMASK(31, 16) /* CM < 3 */
355 #define CM_GCR_Cx_OTHER_CLUSTER_EN BIT(31) /* CM >= 3.5 */
356 #define CM_GCR_Cx_OTHER_GIC_EN BIT(30) /* CM >= 3.5 */
357 #define CM_GCR_Cx_OTHER_BLOCK GENMASK(25, 24) /* CM >= 3.5 */
358 #define CM_GCR_Cx_OTHER_BLOCK_LOCAL 0
359 #define CM_GCR_Cx_OTHER_BLOCK_GLOBAL 1
360 #define CM_GCR_Cx_OTHER_BLOCK_USER 2
361 #define CM_GCR_Cx_OTHER_BLOCK_GLOBAL_HIGH 3
362 #define CM_GCR_Cx_OTHER_CLUSTER GENMASK(21, 16) /* CM >= 3.5 */
363 #define CM3_GCR_Cx_OTHER_CORE GENMASK(13, 8) /* CM >= 3 */
364 #define CM_GCR_Cx_OTHER_CORE_CM 32
365 #define CM3_GCR_Cx_OTHER_VP GENMASK(2, 0) /* CM >= 3 */
366
367 /* GCR_Cx_RESET_BASE - Configure where powered up cores will fetch from */
368 GCR_CX_ACCESSOR_RW(32, 0x020, reset_base)
369 GCR_CX_ACCESSOR_RW(64, 0x020, reset64_base)
370 #define CM_GCR_Cx_RESET_BASE_BEVEXCBASE GENMASK(31, 12)
371 #define CM_GCR_Cx_RESET64_BASE_BEVEXCBASE GENMASK_ULL(47, 12)
372 #define CM_GCR_Cx_RESET_BASE_MODE BIT(1)
373
374 /* GCR_Cx_ID - Identify the current core */
375 GCR_CX_ACCESSOR_RO(32, 0x028, id)
376 #define CM_GCR_Cx_ID_CLUSTER GENMASK(15, 8)
377 #define CM_GCR_Cx_ID_CORE GENMASK(7, 0)
378
379 /* GCR_Cx_RESET_EXT_BASE - Configure behaviour when cores reset or power up */
380 GCR_CX_ACCESSOR_RW(32, 0x030, reset_ext_base)
381 #define CM_GCR_Cx_RESET_EXT_BASE_EVARESET BIT(31)
382 #define CM_GCR_Cx_RESET_EXT_BASE_UEB BIT(30)
383 #define CM_GCR_Cx_RESET_EXT_BASE_BEVEXCMASK GENMASK(27, 20)
384 #define CM_GCR_Cx_RESET_EXT_BASE_BEVEXCPA GENMASK(7, 1)
385 #define CM_GCR_Cx_RESET_EXT_BASE_PRESENT BIT(0)
386
387 /**
388 * mips_cm_l2sync - perform an L2-only sync operation
389 *
390 * If an L2-only sync region is present in the system then this function
391 * performs and L2-only sync and returns zero. Otherwise it returns -ENODEV.
392 */
mips_cm_l2sync(void)393 static inline int mips_cm_l2sync(void)
394 {
395 if (!mips_cm_has_l2sync())
396 return -ENODEV;
397
398 writel(0, mips_cm_l2sync_base);
399 return 0;
400 }
401
402 /**
403 * mips_cm_revision() - return CM revision
404 *
405 * Return: The revision of the CM, from GCR_REV, or 0 if no CM is present. The
406 * return value should be checked against the CM_REV_* macros.
407 */
mips_cm_revision(void)408 static inline int mips_cm_revision(void)
409 {
410 if (!mips_cm_present())
411 return 0;
412
413 return read_gcr_rev();
414 }
415
416 /**
417 * mips_cm_max_vp_width() - return the width in bits of VP indices
418 *
419 * Return: the width, in bits, of VP indices in fields that combine core & VP
420 * indices.
421 */
mips_cm_max_vp_width(void)422 static inline unsigned int mips_cm_max_vp_width(void)
423 {
424 extern int smp_num_siblings;
425
426 if (mips_cm_revision() >= CM_REV_CM3)
427 return FIELD_GET(CM_GCR_SYS_CONFIG2_MAXVPW,
428 read_gcr_sys_config2());
429
430 if (mips_cm_present()) {
431 /*
432 * We presume that all cores in the system will have the same
433 * number of VP(E)s, and if that ever changes then this will
434 * need revisiting.
435 */
436 return FIELD_GET(CM_GCR_Cx_CONFIG_PVPE, read_gcr_cl_config()) + 1;
437 }
438
439 if (IS_ENABLED(CONFIG_SMP))
440 return smp_num_siblings;
441
442 return 1;
443 }
444
445 /**
446 * mips_cm_vp_id() - calculate the hardware VP ID for a CPU
447 * @cpu: the CPU whose VP ID to calculate
448 *
449 * Hardware such as the GIC uses identifiers for VPs which may not match the
450 * CPU numbers used by Linux. This function calculates the hardware VP
451 * identifier corresponding to a given CPU.
452 *
453 * Return: the VP ID for the CPU.
454 */
mips_cm_vp_id(unsigned int cpu)455 static inline unsigned int mips_cm_vp_id(unsigned int cpu)
456 {
457 unsigned int core = cpu_core(&cpu_data[cpu]);
458 unsigned int vp = cpu_vpe_id(&cpu_data[cpu]);
459
460 return (core * mips_cm_max_vp_width()) + vp;
461 }
462
463 #ifdef CONFIG_MIPS_CM
464
465 /**
466 * mips_cm_lock_other - lock access to redirect/other region
467 * @cluster: the other cluster to be accessed
468 * @core: the other core to be accessed
469 * @vp: the VP within the other core to be accessed
470 * @block: the register block to be accessed
471 *
472 * Configure the redirect/other region for the local core/VP (depending upon
473 * the CM revision) to target the specified @cluster, @core, @vp & register
474 * @block. Must be called before using the redirect/other region, and followed
475 * by a call to mips_cm_unlock_other() when access to the redirect/other region
476 * is complete.
477 *
478 * This function acquires a spinlock such that code between it &
479 * mips_cm_unlock_other() calls cannot be pre-empted by anything which may
480 * reconfigure the redirect/other region, and cannot be interfered with by
481 * another VP in the core. As such calls to this function should not be nested.
482 */
483 extern void mips_cm_lock_other(unsigned int cluster, unsigned int core,
484 unsigned int vp, unsigned int block);
485
486 /**
487 * mips_cm_unlock_other - unlock access to redirect/other region
488 *
489 * Must be called after mips_cm_lock_other() once all required access to the
490 * redirect/other region has been completed.
491 */
492 extern void mips_cm_unlock_other(void);
493
494 #else /* !CONFIG_MIPS_CM */
495
mips_cm_lock_other(unsigned int cluster,unsigned int core,unsigned int vp,unsigned int block)496 static inline void mips_cm_lock_other(unsigned int cluster, unsigned int core,
497 unsigned int vp, unsigned int block) { }
mips_cm_unlock_other(void)498 static inline void mips_cm_unlock_other(void) { }
499
500 #endif /* !CONFIG_MIPS_CM */
501
502 /**
503 * mips_cm_lock_other_cpu - lock access to redirect/other region
504 * @cpu: the other CPU whose register we want to access
505 *
506 * Configure the redirect/other region for the local core/VP (depending upon
507 * the CM revision) to target the specified @cpu & register @block. This is
508 * equivalent to calling mips_cm_lock_other() but accepts a Linux CPU number
509 * for convenience.
510 */
mips_cm_lock_other_cpu(unsigned int cpu,unsigned int block)511 static inline void mips_cm_lock_other_cpu(unsigned int cpu, unsigned int block)
512 {
513 struct cpuinfo_mips *d = &cpu_data[cpu];
514
515 mips_cm_lock_other(cpu_cluster(d), cpu_core(d), cpu_vpe_id(d), block);
516 }
517
518 #endif /* __MIPS_ASM_MIPS_CM_H__ */
519