1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2020 Intel Corporation. All rights reserved. */
3 #include <linux/security.h>
4 #include <linux/debugfs.h>
5 #include <linux/ktime.h>
6 #include <linux/mutex.h>
7 #include <linux/unaligned.h>
8 #include <cxlpci.h>
9 #include <cxlmem.h>
10 #include <cxl.h>
11
12 #include "core.h"
13 #include "trace.h"
14 #include "mce.h"
15
16 static bool cxl_raw_allow_all;
17
18 /**
19 * DOC: cxl mbox
20 *
21 * Core implementation of the CXL 2.0 Type-3 Memory Device Mailbox. The
22 * implementation is used by the cxl_pci driver to initialize the device
23 * and implement the cxl_mem.h IOCTL UAPI. It also implements the
24 * backend of the cxl_pmem_ctl() transport for LIBNVDIMM.
25 */
26
27 #define cxl_for_each_cmd(cmd) \
28 for ((cmd) = &cxl_mem_commands[0]; \
29 ((cmd) - cxl_mem_commands) < ARRAY_SIZE(cxl_mem_commands); (cmd)++)
30
31 #define CXL_CMD(_id, sin, sout, _flags) \
32 [CXL_MEM_COMMAND_ID_##_id] = { \
33 .info = { \
34 .id = CXL_MEM_COMMAND_ID_##_id, \
35 .size_in = sin, \
36 .size_out = sout, \
37 }, \
38 .opcode = CXL_MBOX_OP_##_id, \
39 .flags = _flags, \
40 }
41
42 #define CXL_VARIABLE_PAYLOAD ~0U
43 /*
44 * This table defines the supported mailbox commands for the driver. This table
45 * is made up of a UAPI structure. Non-negative values as parameters in the
46 * table will be validated against the user's input. For example, if size_in is
47 * 0, and the user passed in 1, it is an error.
48 */
49 static struct cxl_mem_command cxl_mem_commands[CXL_MEM_COMMAND_ID_MAX] = {
50 CXL_CMD(IDENTIFY, 0, 0x43, CXL_CMD_FLAG_FORCE_ENABLE),
51 #ifdef CONFIG_CXL_MEM_RAW_COMMANDS
52 CXL_CMD(RAW, CXL_VARIABLE_PAYLOAD, CXL_VARIABLE_PAYLOAD, 0),
53 #endif
54 CXL_CMD(GET_SUPPORTED_LOGS, 0, CXL_VARIABLE_PAYLOAD, CXL_CMD_FLAG_FORCE_ENABLE),
55 CXL_CMD(GET_FW_INFO, 0, 0x50, 0),
56 CXL_CMD(GET_PARTITION_INFO, 0, 0x20, 0),
57 CXL_CMD(GET_LSA, 0x8, CXL_VARIABLE_PAYLOAD, 0),
58 CXL_CMD(GET_HEALTH_INFO, 0, 0x12, 0),
59 CXL_CMD(GET_LOG, 0x18, CXL_VARIABLE_PAYLOAD, CXL_CMD_FLAG_FORCE_ENABLE),
60 CXL_CMD(GET_LOG_CAPS, 0x10, 0x4, 0),
61 CXL_CMD(CLEAR_LOG, 0x10, 0, 0),
62 CXL_CMD(GET_SUP_LOG_SUBLIST, 0x2, CXL_VARIABLE_PAYLOAD, 0),
63 CXL_CMD(SET_PARTITION_INFO, 0x0a, 0, 0),
64 CXL_CMD(SET_LSA, CXL_VARIABLE_PAYLOAD, 0, 0),
65 CXL_CMD(GET_ALERT_CONFIG, 0, 0x10, 0),
66 CXL_CMD(SET_ALERT_CONFIG, 0xc, 0, 0),
67 CXL_CMD(GET_SHUTDOWN_STATE, 0, 0x1, 0),
68 CXL_CMD(SET_SHUTDOWN_STATE, 0x1, 0, 0),
69 CXL_CMD(GET_SCAN_MEDIA_CAPS, 0x10, 0x4, 0),
70 CXL_CMD(GET_TIMESTAMP, 0, 0x8, 0),
71 };
72
73 /*
74 * Commands that RAW doesn't permit. The rationale for each:
75 *
76 * CXL_MBOX_OP_ACTIVATE_FW: Firmware activation requires adjustment /
77 * coordination of transaction timeout values at the root bridge level.
78 *
79 * CXL_MBOX_OP_SET_PARTITION_INFO: The device memory map may change live
80 * and needs to be coordinated with HDM updates.
81 *
82 * CXL_MBOX_OP_SET_LSA: The label storage area may be cached by the
83 * driver and any writes from userspace invalidates those contents.
84 *
85 * CXL_MBOX_OP_SET_SHUTDOWN_STATE: Set shutdown state assumes no writes
86 * to the device after it is marked clean, userspace can not make that
87 * assertion.
88 *
89 * CXL_MBOX_OP_[GET_]SCAN_MEDIA: The kernel provides a native error list that
90 * is kept up to date with patrol notifications and error management.
91 *
92 * CXL_MBOX_OP_[GET_,INJECT_,CLEAR_]POISON: These commands require kernel
93 * driver orchestration for safety.
94 */
95 static u16 cxl_disabled_raw_commands[] = {
96 CXL_MBOX_OP_ACTIVATE_FW,
97 CXL_MBOX_OP_SET_PARTITION_INFO,
98 CXL_MBOX_OP_SET_LSA,
99 CXL_MBOX_OP_SET_SHUTDOWN_STATE,
100 CXL_MBOX_OP_SCAN_MEDIA,
101 CXL_MBOX_OP_GET_SCAN_MEDIA,
102 CXL_MBOX_OP_GET_POISON,
103 CXL_MBOX_OP_INJECT_POISON,
104 CXL_MBOX_OP_CLEAR_POISON,
105 };
106
107 /*
108 * Command sets that RAW doesn't permit. All opcodes in this set are
109 * disabled because they pass plain text security payloads over the
110 * user/kernel boundary. This functionality is intended to be wrapped
111 * behind the keys ABI which allows for encrypted payloads in the UAPI
112 */
113 static u8 security_command_sets[] = {
114 0x44, /* Sanitize */
115 0x45, /* Persistent Memory Data-at-rest Security */
116 0x46, /* Security Passthrough */
117 };
118
cxl_is_security_command(u16 opcode)119 static bool cxl_is_security_command(u16 opcode)
120 {
121 int i;
122
123 for (i = 0; i < ARRAY_SIZE(security_command_sets); i++)
124 if (security_command_sets[i] == (opcode >> 8))
125 return true;
126 return false;
127 }
128
cxl_set_security_cmd_enabled(struct cxl_security_state * security,u16 opcode)129 static void cxl_set_security_cmd_enabled(struct cxl_security_state *security,
130 u16 opcode)
131 {
132 switch (opcode) {
133 case CXL_MBOX_OP_SANITIZE:
134 set_bit(CXL_SEC_ENABLED_SANITIZE, security->enabled_cmds);
135 break;
136 case CXL_MBOX_OP_SECURE_ERASE:
137 set_bit(CXL_SEC_ENABLED_SECURE_ERASE,
138 security->enabled_cmds);
139 break;
140 case CXL_MBOX_OP_GET_SECURITY_STATE:
141 set_bit(CXL_SEC_ENABLED_GET_SECURITY_STATE,
142 security->enabled_cmds);
143 break;
144 case CXL_MBOX_OP_SET_PASSPHRASE:
145 set_bit(CXL_SEC_ENABLED_SET_PASSPHRASE,
146 security->enabled_cmds);
147 break;
148 case CXL_MBOX_OP_DISABLE_PASSPHRASE:
149 set_bit(CXL_SEC_ENABLED_DISABLE_PASSPHRASE,
150 security->enabled_cmds);
151 break;
152 case CXL_MBOX_OP_UNLOCK:
153 set_bit(CXL_SEC_ENABLED_UNLOCK, security->enabled_cmds);
154 break;
155 case CXL_MBOX_OP_FREEZE_SECURITY:
156 set_bit(CXL_SEC_ENABLED_FREEZE_SECURITY,
157 security->enabled_cmds);
158 break;
159 case CXL_MBOX_OP_PASSPHRASE_SECURE_ERASE:
160 set_bit(CXL_SEC_ENABLED_PASSPHRASE_SECURE_ERASE,
161 security->enabled_cmds);
162 break;
163 default:
164 break;
165 }
166 }
167
cxl_is_poison_command(u16 opcode)168 static bool cxl_is_poison_command(u16 opcode)
169 {
170 #define CXL_MBOX_OP_POISON_CMDS 0x43
171
172 if ((opcode >> 8) == CXL_MBOX_OP_POISON_CMDS)
173 return true;
174
175 return false;
176 }
177
cxl_set_poison_cmd_enabled(struct cxl_poison_state * poison,u16 opcode)178 static void cxl_set_poison_cmd_enabled(struct cxl_poison_state *poison,
179 u16 opcode)
180 {
181 switch (opcode) {
182 case CXL_MBOX_OP_GET_POISON:
183 set_bit(CXL_POISON_ENABLED_LIST, poison->enabled_cmds);
184 break;
185 case CXL_MBOX_OP_INJECT_POISON:
186 set_bit(CXL_POISON_ENABLED_INJECT, poison->enabled_cmds);
187 break;
188 case CXL_MBOX_OP_CLEAR_POISON:
189 set_bit(CXL_POISON_ENABLED_CLEAR, poison->enabled_cmds);
190 break;
191 case CXL_MBOX_OP_GET_SCAN_MEDIA_CAPS:
192 set_bit(CXL_POISON_ENABLED_SCAN_CAPS, poison->enabled_cmds);
193 break;
194 case CXL_MBOX_OP_SCAN_MEDIA:
195 set_bit(CXL_POISON_ENABLED_SCAN_MEDIA, poison->enabled_cmds);
196 break;
197 case CXL_MBOX_OP_GET_SCAN_MEDIA:
198 set_bit(CXL_POISON_ENABLED_SCAN_RESULTS, poison->enabled_cmds);
199 break;
200 default:
201 break;
202 }
203 }
204
cxl_mem_find_command(u16 opcode)205 static struct cxl_mem_command *cxl_mem_find_command(u16 opcode)
206 {
207 struct cxl_mem_command *c;
208
209 cxl_for_each_cmd(c)
210 if (c->opcode == opcode)
211 return c;
212
213 return NULL;
214 }
215
cxl_mem_opcode_to_name(u16 opcode)216 static const char *cxl_mem_opcode_to_name(u16 opcode)
217 {
218 struct cxl_mem_command *c;
219
220 c = cxl_mem_find_command(opcode);
221 if (!c)
222 return NULL;
223
224 return cxl_command_names[c->info.id].name;
225 }
226
227 /**
228 * cxl_internal_send_cmd() - Kernel internal interface to send a mailbox command
229 * @cxl_mbox: CXL mailbox context
230 * @mbox_cmd: initialized command to execute
231 *
232 * Context: Any context.
233 * Return:
234 * * %>=0 - Number of bytes returned in @out.
235 * * %-E2BIG - Payload is too large for hardware.
236 * * %-EBUSY - Couldn't acquire exclusive mailbox access.
237 * * %-EFAULT - Hardware error occurred.
238 * * %-ENXIO - Command completed, but device reported an error.
239 * * %-EIO - Unexpected output size.
240 *
241 * Mailbox commands may execute successfully yet the device itself reported an
242 * error. While this distinction can be useful for commands from userspace, the
243 * kernel will only be able to use results when both are successful.
244 */
cxl_internal_send_cmd(struct cxl_mailbox * cxl_mbox,struct cxl_mbox_cmd * mbox_cmd)245 int cxl_internal_send_cmd(struct cxl_mailbox *cxl_mbox,
246 struct cxl_mbox_cmd *mbox_cmd)
247 {
248 size_t out_size, min_out;
249 int rc;
250
251 if (mbox_cmd->size_in > cxl_mbox->payload_size ||
252 mbox_cmd->size_out > cxl_mbox->payload_size)
253 return -E2BIG;
254
255 out_size = mbox_cmd->size_out;
256 min_out = mbox_cmd->min_out;
257 rc = cxl_mbox->mbox_send(cxl_mbox, mbox_cmd);
258 /*
259 * EIO is reserved for a payload size mismatch and mbox_send()
260 * may not return this error.
261 */
262 if (WARN_ONCE(rc == -EIO, "Bad return code: -EIO"))
263 return -ENXIO;
264 if (rc)
265 return rc;
266
267 if (mbox_cmd->return_code != CXL_MBOX_CMD_RC_SUCCESS &&
268 mbox_cmd->return_code != CXL_MBOX_CMD_RC_BACKGROUND)
269 return cxl_mbox_cmd_rc2errno(mbox_cmd);
270
271 if (!out_size)
272 return 0;
273
274 /*
275 * Variable sized output needs to at least satisfy the caller's
276 * minimum if not the fully requested size.
277 */
278 if (min_out == 0)
279 min_out = out_size;
280
281 if (mbox_cmd->size_out < min_out)
282 return -EIO;
283 return 0;
284 }
285 EXPORT_SYMBOL_NS_GPL(cxl_internal_send_cmd, "CXL");
286
cxl_mem_raw_command_allowed(u16 opcode)287 static bool cxl_mem_raw_command_allowed(u16 opcode)
288 {
289 int i;
290
291 if (!IS_ENABLED(CONFIG_CXL_MEM_RAW_COMMANDS))
292 return false;
293
294 if (security_locked_down(LOCKDOWN_PCI_ACCESS))
295 return false;
296
297 if (cxl_raw_allow_all)
298 return true;
299
300 if (cxl_is_security_command(opcode))
301 return false;
302
303 for (i = 0; i < ARRAY_SIZE(cxl_disabled_raw_commands); i++)
304 if (cxl_disabled_raw_commands[i] == opcode)
305 return false;
306
307 return true;
308 }
309
310 /**
311 * cxl_payload_from_user_allowed() - Check contents of in_payload.
312 * @opcode: The mailbox command opcode.
313 * @payload_in: Pointer to the input payload passed in from user space.
314 *
315 * Return:
316 * * true - payload_in passes check for @opcode.
317 * * false - payload_in contains invalid or unsupported values.
318 *
319 * The driver may inspect payload contents before sending a mailbox
320 * command from user space to the device. The intent is to reject
321 * commands with input payloads that are known to be unsafe. This
322 * check is not intended to replace the users careful selection of
323 * mailbox command parameters and makes no guarantee that the user
324 * command will succeed, nor that it is appropriate.
325 *
326 * The specific checks are determined by the opcode.
327 */
cxl_payload_from_user_allowed(u16 opcode,void * payload_in)328 static bool cxl_payload_from_user_allowed(u16 opcode, void *payload_in)
329 {
330 switch (opcode) {
331 case CXL_MBOX_OP_SET_PARTITION_INFO: {
332 struct cxl_mbox_set_partition_info *pi = payload_in;
333
334 if (pi->flags & CXL_SET_PARTITION_IMMEDIATE_FLAG)
335 return false;
336 break;
337 }
338 case CXL_MBOX_OP_CLEAR_LOG: {
339 const uuid_t *uuid = (uuid_t *)payload_in;
340
341 /*
342 * Restrict the ‘Clear log’ action to only apply to
343 * Vendor debug logs.
344 */
345 return uuid_equal(uuid, &DEFINE_CXL_VENDOR_DEBUG_UUID);
346 }
347 default:
348 break;
349 }
350 return true;
351 }
352
cxl_mbox_cmd_ctor(struct cxl_mbox_cmd * mbox_cmd,struct cxl_mailbox * cxl_mbox,u16 opcode,size_t in_size,size_t out_size,u64 in_payload)353 static int cxl_mbox_cmd_ctor(struct cxl_mbox_cmd *mbox_cmd,
354 struct cxl_mailbox *cxl_mbox, u16 opcode,
355 size_t in_size, size_t out_size, u64 in_payload)
356 {
357 *mbox_cmd = (struct cxl_mbox_cmd) {
358 .opcode = opcode,
359 .size_in = in_size,
360 };
361
362 if (in_size) {
363 mbox_cmd->payload_in = vmemdup_user(u64_to_user_ptr(in_payload),
364 in_size);
365 if (IS_ERR(mbox_cmd->payload_in))
366 return PTR_ERR(mbox_cmd->payload_in);
367
368 if (!cxl_payload_from_user_allowed(opcode, mbox_cmd->payload_in)) {
369 dev_dbg(cxl_mbox->host, "%s: input payload not allowed\n",
370 cxl_mem_opcode_to_name(opcode));
371 kvfree(mbox_cmd->payload_in);
372 return -EBUSY;
373 }
374 }
375
376 /* Prepare to handle a full payload for variable sized output */
377 if (out_size == CXL_VARIABLE_PAYLOAD)
378 mbox_cmd->size_out = cxl_mbox->payload_size;
379 else
380 mbox_cmd->size_out = out_size;
381
382 if (mbox_cmd->size_out) {
383 mbox_cmd->payload_out = kvzalloc(mbox_cmd->size_out, GFP_KERNEL);
384 if (!mbox_cmd->payload_out) {
385 kvfree(mbox_cmd->payload_in);
386 return -ENOMEM;
387 }
388 }
389 return 0;
390 }
391
cxl_mbox_cmd_dtor(struct cxl_mbox_cmd * mbox)392 static void cxl_mbox_cmd_dtor(struct cxl_mbox_cmd *mbox)
393 {
394 kvfree(mbox->payload_in);
395 kvfree(mbox->payload_out);
396 }
397
cxl_to_mem_cmd_raw(struct cxl_mem_command * mem_cmd,const struct cxl_send_command * send_cmd,struct cxl_mailbox * cxl_mbox)398 static int cxl_to_mem_cmd_raw(struct cxl_mem_command *mem_cmd,
399 const struct cxl_send_command *send_cmd,
400 struct cxl_mailbox *cxl_mbox)
401 {
402 if (send_cmd->raw.rsvd)
403 return -EINVAL;
404
405 /*
406 * Unlike supported commands, the output size of RAW commands
407 * gets passed along without further checking, so it must be
408 * validated here.
409 */
410 if (send_cmd->out.size > cxl_mbox->payload_size)
411 return -EINVAL;
412
413 if (!cxl_mem_raw_command_allowed(send_cmd->raw.opcode))
414 return -EPERM;
415
416 dev_WARN_ONCE(cxl_mbox->host, true, "raw command path used\n");
417
418 *mem_cmd = (struct cxl_mem_command) {
419 .info = {
420 .id = CXL_MEM_COMMAND_ID_RAW,
421 .size_in = send_cmd->in.size,
422 .size_out = send_cmd->out.size,
423 },
424 .opcode = send_cmd->raw.opcode
425 };
426
427 return 0;
428 }
429
cxl_to_mem_cmd(struct cxl_mem_command * mem_cmd,const struct cxl_send_command * send_cmd,struct cxl_mailbox * cxl_mbox)430 static int cxl_to_mem_cmd(struct cxl_mem_command *mem_cmd,
431 const struct cxl_send_command *send_cmd,
432 struct cxl_mailbox *cxl_mbox)
433 {
434 struct cxl_mem_command *c = &cxl_mem_commands[send_cmd->id];
435 const struct cxl_command_info *info = &c->info;
436
437 if (send_cmd->flags & ~CXL_MEM_COMMAND_FLAG_MASK)
438 return -EINVAL;
439
440 if (send_cmd->rsvd)
441 return -EINVAL;
442
443 if (send_cmd->in.rsvd || send_cmd->out.rsvd)
444 return -EINVAL;
445
446 /* Check that the command is enabled for hardware */
447 if (!test_bit(info->id, cxl_mbox->enabled_cmds))
448 return -ENOTTY;
449
450 /* Check that the command is not claimed for exclusive kernel use */
451 if (test_bit(info->id, cxl_mbox->exclusive_cmds))
452 return -EBUSY;
453
454 /* Check the input buffer is the expected size */
455 if ((info->size_in != CXL_VARIABLE_PAYLOAD) &&
456 (info->size_in != send_cmd->in.size))
457 return -ENOMEM;
458
459 /* Check the output buffer is at least large enough */
460 if ((info->size_out != CXL_VARIABLE_PAYLOAD) &&
461 (send_cmd->out.size < info->size_out))
462 return -ENOMEM;
463
464 *mem_cmd = (struct cxl_mem_command) {
465 .info = {
466 .id = info->id,
467 .flags = info->flags,
468 .size_in = send_cmd->in.size,
469 .size_out = send_cmd->out.size,
470 },
471 .opcode = c->opcode
472 };
473
474 return 0;
475 }
476
477 /**
478 * cxl_validate_cmd_from_user() - Check fields for CXL_MEM_SEND_COMMAND.
479 * @mbox_cmd: Sanitized and populated &struct cxl_mbox_cmd.
480 * @cxl_mbox: CXL mailbox context
481 * @send_cmd: &struct cxl_send_command copied in from userspace.
482 *
483 * Return:
484 * * %0 - @out_cmd is ready to send.
485 * * %-ENOTTY - Invalid command specified.
486 * * %-EINVAL - Reserved fields or invalid values were used.
487 * * %-ENOMEM - Input or output buffer wasn't sized properly.
488 * * %-EPERM - Attempted to use a protected command.
489 * * %-EBUSY - Kernel has claimed exclusive access to this opcode
490 *
491 * The result of this command is a fully validated command in @mbox_cmd that is
492 * safe to send to the hardware.
493 */
cxl_validate_cmd_from_user(struct cxl_mbox_cmd * mbox_cmd,struct cxl_mailbox * cxl_mbox,const struct cxl_send_command * send_cmd)494 static int cxl_validate_cmd_from_user(struct cxl_mbox_cmd *mbox_cmd,
495 struct cxl_mailbox *cxl_mbox,
496 const struct cxl_send_command *send_cmd)
497 {
498 struct cxl_mem_command mem_cmd;
499 int rc;
500
501 if (send_cmd->id == 0 || send_cmd->id >= CXL_MEM_COMMAND_ID_MAX)
502 return -ENOTTY;
503
504 /*
505 * The user can never specify an input payload larger than what hardware
506 * supports, but output can be arbitrarily large (simply write out as
507 * much data as the hardware provides).
508 */
509 if (send_cmd->in.size > cxl_mbox->payload_size)
510 return -EINVAL;
511
512 /* Sanitize and construct a cxl_mem_command */
513 if (send_cmd->id == CXL_MEM_COMMAND_ID_RAW)
514 rc = cxl_to_mem_cmd_raw(&mem_cmd, send_cmd, cxl_mbox);
515 else
516 rc = cxl_to_mem_cmd(&mem_cmd, send_cmd, cxl_mbox);
517
518 if (rc)
519 return rc;
520
521 /* Sanitize and construct a cxl_mbox_cmd */
522 return cxl_mbox_cmd_ctor(mbox_cmd, cxl_mbox, mem_cmd.opcode,
523 mem_cmd.info.size_in, mem_cmd.info.size_out,
524 send_cmd->in.payload);
525 }
526
cxl_query_cmd(struct cxl_mailbox * cxl_mbox,struct cxl_mem_query_commands __user * q)527 int cxl_query_cmd(struct cxl_mailbox *cxl_mbox,
528 struct cxl_mem_query_commands __user *q)
529 {
530 struct device *dev = cxl_mbox->host;
531 struct cxl_mem_command *cmd;
532 u32 n_commands;
533 int j = 0;
534
535 dev_dbg(dev, "Query IOCTL\n");
536
537 if (get_user(n_commands, &q->n_commands))
538 return -EFAULT;
539
540 /* returns the total number if 0 elements are requested. */
541 if (n_commands == 0)
542 return put_user(ARRAY_SIZE(cxl_mem_commands), &q->n_commands);
543
544 /*
545 * otherwise, return min(n_commands, total commands) cxl_command_info
546 * structures.
547 */
548 cxl_for_each_cmd(cmd) {
549 struct cxl_command_info info = cmd->info;
550
551 if (test_bit(info.id, cxl_mbox->enabled_cmds))
552 info.flags |= CXL_MEM_COMMAND_FLAG_ENABLED;
553 if (test_bit(info.id, cxl_mbox->exclusive_cmds))
554 info.flags |= CXL_MEM_COMMAND_FLAG_EXCLUSIVE;
555
556 if (copy_to_user(&q->commands[j++], &info, sizeof(info)))
557 return -EFAULT;
558
559 if (j == n_commands)
560 break;
561 }
562
563 return 0;
564 }
565
566 /**
567 * handle_mailbox_cmd_from_user() - Dispatch a mailbox command for userspace.
568 * @cxl_mbox: The mailbox context for the operation.
569 * @mbox_cmd: The validated mailbox command.
570 * @out_payload: Pointer to userspace's output payload.
571 * @size_out: (Input) Max payload size to copy out.
572 * (Output) Payload size hardware generated.
573 * @retval: Hardware generated return code from the operation.
574 *
575 * Return:
576 * * %0 - Mailbox transaction succeeded. This implies the mailbox
577 * protocol completed successfully not that the operation itself
578 * was successful.
579 * * %-ENOMEM - Couldn't allocate a bounce buffer.
580 * * %-EFAULT - Something happened with copy_to/from_user.
581 * * %-EINTR - Mailbox acquisition interrupted.
582 * * %-EXXX - Transaction level failures.
583 *
584 * Dispatches a mailbox command on behalf of a userspace request.
585 * The output payload is copied to userspace.
586 *
587 * See cxl_send_cmd().
588 */
handle_mailbox_cmd_from_user(struct cxl_mailbox * cxl_mbox,struct cxl_mbox_cmd * mbox_cmd,u64 out_payload,s32 * size_out,u32 * retval)589 static int handle_mailbox_cmd_from_user(struct cxl_mailbox *cxl_mbox,
590 struct cxl_mbox_cmd *mbox_cmd,
591 u64 out_payload, s32 *size_out,
592 u32 *retval)
593 {
594 struct device *dev = cxl_mbox->host;
595 int rc;
596
597 dev_dbg(dev,
598 "Submitting %s command for user\n"
599 "\topcode: %x\n"
600 "\tsize: %zx\n",
601 cxl_mem_opcode_to_name(mbox_cmd->opcode),
602 mbox_cmd->opcode, mbox_cmd->size_in);
603
604 rc = cxl_mbox->mbox_send(cxl_mbox, mbox_cmd);
605 if (rc)
606 goto out;
607
608 /*
609 * @size_out contains the max size that's allowed to be written back out
610 * to userspace. While the payload may have written more output than
611 * this it will have to be ignored.
612 */
613 if (mbox_cmd->size_out) {
614 dev_WARN_ONCE(dev, mbox_cmd->size_out > *size_out,
615 "Invalid return size\n");
616 if (copy_to_user(u64_to_user_ptr(out_payload),
617 mbox_cmd->payload_out, mbox_cmd->size_out)) {
618 rc = -EFAULT;
619 goto out;
620 }
621 }
622
623 *size_out = mbox_cmd->size_out;
624 *retval = mbox_cmd->return_code;
625
626 out:
627 cxl_mbox_cmd_dtor(mbox_cmd);
628 return rc;
629 }
630
cxl_send_cmd(struct cxl_mailbox * cxl_mbox,struct cxl_send_command __user * s)631 int cxl_send_cmd(struct cxl_mailbox *cxl_mbox, struct cxl_send_command __user *s)
632 {
633 struct device *dev = cxl_mbox->host;
634 struct cxl_send_command send;
635 struct cxl_mbox_cmd mbox_cmd;
636 int rc;
637
638 dev_dbg(dev, "Send IOCTL\n");
639
640 if (copy_from_user(&send, s, sizeof(send)))
641 return -EFAULT;
642
643 rc = cxl_validate_cmd_from_user(&mbox_cmd, cxl_mbox, &send);
644 if (rc)
645 return rc;
646
647 rc = handle_mailbox_cmd_from_user(cxl_mbox, &mbox_cmd, send.out.payload,
648 &send.out.size, &send.retval);
649 if (rc)
650 return rc;
651
652 if (copy_to_user(s, &send, sizeof(send)))
653 return -EFAULT;
654
655 return 0;
656 }
657
cxl_xfer_log(struct cxl_memdev_state * mds,uuid_t * uuid,u32 * size,u8 * out)658 static int cxl_xfer_log(struct cxl_memdev_state *mds, uuid_t *uuid,
659 u32 *size, u8 *out)
660 {
661 struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
662 u32 remaining = *size;
663 u32 offset = 0;
664
665 while (remaining) {
666 u32 xfer_size = min_t(u32, remaining, cxl_mbox->payload_size);
667 struct cxl_mbox_cmd mbox_cmd;
668 struct cxl_mbox_get_log log;
669 int rc;
670
671 log = (struct cxl_mbox_get_log) {
672 .uuid = *uuid,
673 .offset = cpu_to_le32(offset),
674 .length = cpu_to_le32(xfer_size),
675 };
676
677 mbox_cmd = (struct cxl_mbox_cmd) {
678 .opcode = CXL_MBOX_OP_GET_LOG,
679 .size_in = sizeof(log),
680 .payload_in = &log,
681 .size_out = xfer_size,
682 .payload_out = out,
683 };
684
685 rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
686
687 /*
688 * The output payload length that indicates the number
689 * of valid bytes can be smaller than the Log buffer
690 * size.
691 */
692 if (rc == -EIO && mbox_cmd.size_out < xfer_size) {
693 offset += mbox_cmd.size_out;
694 break;
695 }
696
697 if (rc < 0)
698 return rc;
699
700 out += xfer_size;
701 remaining -= xfer_size;
702 offset += xfer_size;
703 }
704
705 *size = offset;
706
707 return 0;
708 }
709
check_features_opcodes(u16 opcode,int * ro_cmds,int * wr_cmds)710 static int check_features_opcodes(u16 opcode, int *ro_cmds, int *wr_cmds)
711 {
712 switch (opcode) {
713 case CXL_MBOX_OP_GET_SUPPORTED_FEATURES:
714 case CXL_MBOX_OP_GET_FEATURE:
715 (*ro_cmds)++;
716 return 1;
717 case CXL_MBOX_OP_SET_FEATURE:
718 (*wr_cmds)++;
719 return 1;
720 default:
721 return 0;
722 }
723 }
724
725 /* 'Get Supported Features' and 'Get Feature' */
726 #define MAX_FEATURES_READ_CMDS 2
set_features_cap(struct cxl_mailbox * cxl_mbox,int ro_cmds,int wr_cmds)727 static void set_features_cap(struct cxl_mailbox *cxl_mbox,
728 int ro_cmds, int wr_cmds)
729 {
730 /* Setting up Features capability while walking the CEL */
731 if (ro_cmds == MAX_FEATURES_READ_CMDS) {
732 if (wr_cmds)
733 cxl_mbox->feat_cap = CXL_FEATURES_RW;
734 else
735 cxl_mbox->feat_cap = CXL_FEATURES_RO;
736 }
737 }
738
739 /**
740 * cxl_walk_cel() - Walk through the Command Effects Log.
741 * @mds: The driver data for the operation
742 * @size: Length of the Command Effects Log.
743 * @cel: CEL
744 *
745 * Iterate over each entry in the CEL and determine if the driver supports the
746 * command. If so, the command is enabled for the device and can be used later.
747 */
cxl_walk_cel(struct cxl_memdev_state * mds,size_t size,u8 * cel)748 static void cxl_walk_cel(struct cxl_memdev_state *mds, size_t size, u8 *cel)
749 {
750 struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
751 struct cxl_cel_entry *cel_entry;
752 const int cel_entries = size / sizeof(*cel_entry);
753 struct device *dev = mds->cxlds.dev;
754 int i, ro_cmds = 0, wr_cmds = 0;
755
756 cel_entry = (struct cxl_cel_entry *) cel;
757
758 for (i = 0; i < cel_entries; i++) {
759 u16 opcode = le16_to_cpu(cel_entry[i].opcode);
760 struct cxl_mem_command *cmd = cxl_mem_find_command(opcode);
761 int enabled = 0;
762
763 if (cmd) {
764 set_bit(cmd->info.id, cxl_mbox->enabled_cmds);
765 enabled++;
766 }
767
768 enabled += check_features_opcodes(opcode, &ro_cmds,
769 &wr_cmds);
770
771 if (cxl_is_poison_command(opcode)) {
772 cxl_set_poison_cmd_enabled(&mds->poison, opcode);
773 enabled++;
774 }
775
776 if (cxl_is_security_command(opcode)) {
777 cxl_set_security_cmd_enabled(&mds->security, opcode);
778 enabled++;
779 }
780
781 dev_dbg(dev, "Opcode 0x%04x %s\n", opcode,
782 enabled ? "enabled" : "unsupported by driver");
783 }
784
785 set_features_cap(cxl_mbox, ro_cmds, wr_cmds);
786 }
787
cxl_get_gsl(struct cxl_memdev_state * mds)788 static struct cxl_mbox_get_supported_logs *cxl_get_gsl(struct cxl_memdev_state *mds)
789 {
790 struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
791 struct cxl_mbox_get_supported_logs *ret;
792 struct cxl_mbox_cmd mbox_cmd;
793 int rc;
794
795 ret = kvmalloc(cxl_mbox->payload_size, GFP_KERNEL);
796 if (!ret)
797 return ERR_PTR(-ENOMEM);
798
799 mbox_cmd = (struct cxl_mbox_cmd) {
800 .opcode = CXL_MBOX_OP_GET_SUPPORTED_LOGS,
801 .size_out = cxl_mbox->payload_size,
802 .payload_out = ret,
803 /* At least the record number field must be valid */
804 .min_out = 2,
805 };
806 rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
807 if (rc < 0) {
808 kvfree(ret);
809 return ERR_PTR(rc);
810 }
811
812
813 return ret;
814 }
815
816 enum {
817 CEL_UUID,
818 VENDOR_DEBUG_UUID,
819 };
820
821 /* See CXL 2.0 Table 170. Get Log Input Payload */
822 static const uuid_t log_uuid[] = {
823 [CEL_UUID] = DEFINE_CXL_CEL_UUID,
824 [VENDOR_DEBUG_UUID] = DEFINE_CXL_VENDOR_DEBUG_UUID,
825 };
826
827 /**
828 * cxl_enumerate_cmds() - Enumerate commands for a device.
829 * @mds: The driver data for the operation
830 *
831 * Returns 0 if enumerate completed successfully.
832 *
833 * CXL devices have optional support for certain commands. This function will
834 * determine the set of supported commands for the hardware and update the
835 * enabled_cmds bitmap in the @mds.
836 */
cxl_enumerate_cmds(struct cxl_memdev_state * mds)837 int cxl_enumerate_cmds(struct cxl_memdev_state *mds)
838 {
839 struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
840 struct cxl_mbox_get_supported_logs *gsl;
841 struct device *dev = mds->cxlds.dev;
842 struct cxl_mem_command *cmd;
843 int i, rc;
844
845 gsl = cxl_get_gsl(mds);
846 if (IS_ERR(gsl))
847 return PTR_ERR(gsl);
848
849 rc = -ENOENT;
850 for (i = 0; i < le16_to_cpu(gsl->entries); i++) {
851 u32 size = le32_to_cpu(gsl->entry[i].size);
852 uuid_t uuid = gsl->entry[i].uuid;
853 u8 *log;
854
855 dev_dbg(dev, "Found LOG type %pU of size %d", &uuid, size);
856
857 if (!uuid_equal(&uuid, &log_uuid[CEL_UUID]))
858 continue;
859
860 log = kvmalloc(size, GFP_KERNEL);
861 if (!log) {
862 rc = -ENOMEM;
863 goto out;
864 }
865
866 rc = cxl_xfer_log(mds, &uuid, &size, log);
867 if (rc) {
868 kvfree(log);
869 goto out;
870 }
871
872 cxl_walk_cel(mds, size, log);
873 kvfree(log);
874
875 /* In case CEL was bogus, enable some default commands. */
876 cxl_for_each_cmd(cmd)
877 if (cmd->flags & CXL_CMD_FLAG_FORCE_ENABLE)
878 set_bit(cmd->info.id, cxl_mbox->enabled_cmds);
879
880 /* Found the required CEL */
881 rc = 0;
882 }
883 out:
884 kvfree(gsl);
885 return rc;
886 }
887 EXPORT_SYMBOL_NS_GPL(cxl_enumerate_cmds, "CXL");
888
cxl_event_trace_record(const struct cxl_memdev * cxlmd,enum cxl_event_log_type type,enum cxl_event_type event_type,const uuid_t * uuid,union cxl_event * evt)889 void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
890 enum cxl_event_log_type type,
891 enum cxl_event_type event_type,
892 const uuid_t *uuid, union cxl_event *evt)
893 {
894 if (event_type == CXL_CPER_EVENT_MEM_MODULE) {
895 trace_cxl_memory_module(cxlmd, type, &evt->mem_module);
896 return;
897 }
898 if (event_type == CXL_CPER_EVENT_GENERIC) {
899 trace_cxl_generic_event(cxlmd, type, uuid, &evt->generic);
900 return;
901 }
902
903 if (trace_cxl_general_media_enabled() || trace_cxl_dram_enabled()) {
904 u64 dpa, hpa = ULLONG_MAX, hpa_alias = ULLONG_MAX;
905 struct cxl_region *cxlr;
906
907 /*
908 * These trace points are annotated with HPA and region
909 * translations. Take topology mutation locks and lookup
910 * { HPA, REGION } from { DPA, MEMDEV } in the event record.
911 */
912 guard(rwsem_read)(&cxl_region_rwsem);
913 guard(rwsem_read)(&cxl_dpa_rwsem);
914
915 dpa = le64_to_cpu(evt->media_hdr.phys_addr) & CXL_DPA_MASK;
916 cxlr = cxl_dpa_to_region(cxlmd, dpa);
917 if (cxlr) {
918 u64 cache_size = cxlr->params.cache_size;
919
920 hpa = cxl_dpa_to_hpa(cxlr, cxlmd, dpa);
921 if (cache_size)
922 hpa_alias = hpa - cache_size;
923 }
924
925 if (event_type == CXL_CPER_EVENT_GEN_MEDIA)
926 trace_cxl_general_media(cxlmd, type, cxlr, hpa,
927 hpa_alias, &evt->gen_media);
928 else if (event_type == CXL_CPER_EVENT_DRAM)
929 trace_cxl_dram(cxlmd, type, cxlr, hpa, hpa_alias,
930 &evt->dram);
931 }
932 }
933 EXPORT_SYMBOL_NS_GPL(cxl_event_trace_record, "CXL");
934
__cxl_event_trace_record(const struct cxl_memdev * cxlmd,enum cxl_event_log_type type,struct cxl_event_record_raw * record)935 static void __cxl_event_trace_record(const struct cxl_memdev *cxlmd,
936 enum cxl_event_log_type type,
937 struct cxl_event_record_raw *record)
938 {
939 enum cxl_event_type ev_type = CXL_CPER_EVENT_GENERIC;
940 const uuid_t *uuid = &record->id;
941
942 if (uuid_equal(uuid, &CXL_EVENT_GEN_MEDIA_UUID))
943 ev_type = CXL_CPER_EVENT_GEN_MEDIA;
944 else if (uuid_equal(uuid, &CXL_EVENT_DRAM_UUID))
945 ev_type = CXL_CPER_EVENT_DRAM;
946 else if (uuid_equal(uuid, &CXL_EVENT_MEM_MODULE_UUID))
947 ev_type = CXL_CPER_EVENT_MEM_MODULE;
948
949 cxl_event_trace_record(cxlmd, type, ev_type, uuid, &record->event);
950 }
951
cxl_clear_event_record(struct cxl_memdev_state * mds,enum cxl_event_log_type log,struct cxl_get_event_payload * get_pl)952 static int cxl_clear_event_record(struct cxl_memdev_state *mds,
953 enum cxl_event_log_type log,
954 struct cxl_get_event_payload *get_pl)
955 {
956 struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
957 struct cxl_mbox_clear_event_payload *payload;
958 u16 total = le16_to_cpu(get_pl->record_count);
959 u8 max_handles = CXL_CLEAR_EVENT_MAX_HANDLES;
960 size_t pl_size = struct_size(payload, handles, max_handles);
961 struct cxl_mbox_cmd mbox_cmd;
962 u16 cnt;
963 int rc = 0;
964 int i;
965
966 /* Payload size may limit the max handles */
967 if (pl_size > cxl_mbox->payload_size) {
968 max_handles = (cxl_mbox->payload_size - sizeof(*payload)) /
969 sizeof(__le16);
970 pl_size = struct_size(payload, handles, max_handles);
971 }
972
973 payload = kvzalloc(pl_size, GFP_KERNEL);
974 if (!payload)
975 return -ENOMEM;
976
977 *payload = (struct cxl_mbox_clear_event_payload) {
978 .event_log = log,
979 };
980
981 mbox_cmd = (struct cxl_mbox_cmd) {
982 .opcode = CXL_MBOX_OP_CLEAR_EVENT_RECORD,
983 .payload_in = payload,
984 .size_in = pl_size,
985 };
986
987 /*
988 * Clear Event Records uses u8 for the handle cnt while Get Event
989 * Record can return up to 0xffff records.
990 */
991 i = 0;
992 for (cnt = 0; cnt < total; cnt++) {
993 struct cxl_event_record_raw *raw = &get_pl->records[cnt];
994 struct cxl_event_generic *gen = &raw->event.generic;
995
996 payload->handles[i++] = gen->hdr.handle;
997 dev_dbg(mds->cxlds.dev, "Event log '%d': Clearing %u\n", log,
998 le16_to_cpu(payload->handles[i - 1]));
999
1000 if (i == max_handles) {
1001 payload->nr_recs = i;
1002 rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
1003 if (rc)
1004 goto free_pl;
1005 i = 0;
1006 }
1007 }
1008
1009 /* Clear what is left if any */
1010 if (i) {
1011 payload->nr_recs = i;
1012 mbox_cmd.size_in = struct_size(payload, handles, i);
1013 rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
1014 if (rc)
1015 goto free_pl;
1016 }
1017
1018 free_pl:
1019 kvfree(payload);
1020 return rc;
1021 }
1022
cxl_mem_get_records_log(struct cxl_memdev_state * mds,enum cxl_event_log_type type)1023 static void cxl_mem_get_records_log(struct cxl_memdev_state *mds,
1024 enum cxl_event_log_type type)
1025 {
1026 struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
1027 struct cxl_memdev *cxlmd = mds->cxlds.cxlmd;
1028 struct device *dev = mds->cxlds.dev;
1029 struct cxl_get_event_payload *payload;
1030 u8 log_type = type;
1031 u16 nr_rec;
1032
1033 mutex_lock(&mds->event.log_lock);
1034 payload = mds->event.buf;
1035
1036 do {
1037 int rc, i;
1038 struct cxl_mbox_cmd mbox_cmd = (struct cxl_mbox_cmd) {
1039 .opcode = CXL_MBOX_OP_GET_EVENT_RECORD,
1040 .payload_in = &log_type,
1041 .size_in = sizeof(log_type),
1042 .payload_out = payload,
1043 .size_out = cxl_mbox->payload_size,
1044 .min_out = struct_size(payload, records, 0),
1045 };
1046
1047 rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
1048 if (rc) {
1049 dev_err_ratelimited(dev,
1050 "Event log '%d': Failed to query event records : %d",
1051 type, rc);
1052 break;
1053 }
1054
1055 nr_rec = le16_to_cpu(payload->record_count);
1056 if (!nr_rec)
1057 break;
1058
1059 for (i = 0; i < nr_rec; i++)
1060 __cxl_event_trace_record(cxlmd, type,
1061 &payload->records[i]);
1062
1063 if (payload->flags & CXL_GET_EVENT_FLAG_OVERFLOW)
1064 trace_cxl_overflow(cxlmd, type, payload);
1065
1066 rc = cxl_clear_event_record(mds, type, payload);
1067 if (rc) {
1068 dev_err_ratelimited(dev,
1069 "Event log '%d': Failed to clear events : %d",
1070 type, rc);
1071 break;
1072 }
1073 } while (nr_rec);
1074
1075 mutex_unlock(&mds->event.log_lock);
1076 }
1077
1078 /**
1079 * cxl_mem_get_event_records - Get Event Records from the device
1080 * @mds: The driver data for the operation
1081 * @status: Event Status register value identifying which events are available.
1082 *
1083 * Retrieve all event records available on the device, report them as trace
1084 * events, and clear them.
1085 *
1086 * See CXL rev 3.0 @8.2.9.2.2 Get Event Records
1087 * See CXL rev 3.0 @8.2.9.2.3 Clear Event Records
1088 */
cxl_mem_get_event_records(struct cxl_memdev_state * mds,u32 status)1089 void cxl_mem_get_event_records(struct cxl_memdev_state *mds, u32 status)
1090 {
1091 dev_dbg(mds->cxlds.dev, "Reading event logs: %x\n", status);
1092
1093 if (status & CXLDEV_EVENT_STATUS_FATAL)
1094 cxl_mem_get_records_log(mds, CXL_EVENT_TYPE_FATAL);
1095 if (status & CXLDEV_EVENT_STATUS_FAIL)
1096 cxl_mem_get_records_log(mds, CXL_EVENT_TYPE_FAIL);
1097 if (status & CXLDEV_EVENT_STATUS_WARN)
1098 cxl_mem_get_records_log(mds, CXL_EVENT_TYPE_WARN);
1099 if (status & CXLDEV_EVENT_STATUS_INFO)
1100 cxl_mem_get_records_log(mds, CXL_EVENT_TYPE_INFO);
1101 }
1102 EXPORT_SYMBOL_NS_GPL(cxl_mem_get_event_records, "CXL");
1103
1104 /**
1105 * cxl_mem_get_partition_info - Get partition info
1106 * @mds: The driver data for the operation
1107 *
1108 * Retrieve the current partition info for the device specified. The active
1109 * values are the current capacity in bytes. If not 0, the 'next' values are
1110 * the pending values, in bytes, which take affect on next cold reset.
1111 *
1112 * Return: 0 if no error: or the result of the mailbox command.
1113 *
1114 * See CXL @8.2.9.5.2.1 Get Partition Info
1115 */
cxl_mem_get_partition_info(struct cxl_memdev_state * mds)1116 static int cxl_mem_get_partition_info(struct cxl_memdev_state *mds)
1117 {
1118 struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
1119 struct cxl_mbox_get_partition_info pi;
1120 struct cxl_mbox_cmd mbox_cmd;
1121 int rc;
1122
1123 mbox_cmd = (struct cxl_mbox_cmd) {
1124 .opcode = CXL_MBOX_OP_GET_PARTITION_INFO,
1125 .size_out = sizeof(pi),
1126 .payload_out = &pi,
1127 };
1128 rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
1129 if (rc)
1130 return rc;
1131
1132 mds->active_volatile_bytes =
1133 le64_to_cpu(pi.active_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
1134 mds->active_persistent_bytes =
1135 le64_to_cpu(pi.active_persistent_cap) * CXL_CAPACITY_MULTIPLIER;
1136
1137 return 0;
1138 }
1139
1140 /**
1141 * cxl_dev_state_identify() - Send the IDENTIFY command to the device.
1142 * @mds: The driver data for the operation
1143 *
1144 * Return: 0 if identify was executed successfully or media not ready.
1145 *
1146 * This will dispatch the identify command to the device and on success populate
1147 * structures to be exported to sysfs.
1148 */
cxl_dev_state_identify(struct cxl_memdev_state * mds)1149 int cxl_dev_state_identify(struct cxl_memdev_state *mds)
1150 {
1151 struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
1152 /* See CXL 2.0 Table 175 Identify Memory Device Output Payload */
1153 struct cxl_mbox_identify id;
1154 struct cxl_mbox_cmd mbox_cmd;
1155 u32 val;
1156 int rc;
1157
1158 if (!mds->cxlds.media_ready)
1159 return 0;
1160
1161 mbox_cmd = (struct cxl_mbox_cmd) {
1162 .opcode = CXL_MBOX_OP_IDENTIFY,
1163 .size_out = sizeof(id),
1164 .payload_out = &id,
1165 };
1166 rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
1167 if (rc < 0)
1168 return rc;
1169
1170 mds->total_bytes =
1171 le64_to_cpu(id.total_capacity) * CXL_CAPACITY_MULTIPLIER;
1172 mds->volatile_only_bytes =
1173 le64_to_cpu(id.volatile_capacity) * CXL_CAPACITY_MULTIPLIER;
1174 mds->persistent_only_bytes =
1175 le64_to_cpu(id.persistent_capacity) * CXL_CAPACITY_MULTIPLIER;
1176 mds->partition_align_bytes =
1177 le64_to_cpu(id.partition_align) * CXL_CAPACITY_MULTIPLIER;
1178
1179 mds->lsa_size = le32_to_cpu(id.lsa_size);
1180 memcpy(mds->firmware_version, id.fw_revision,
1181 sizeof(id.fw_revision));
1182
1183 if (test_bit(CXL_POISON_ENABLED_LIST, mds->poison.enabled_cmds)) {
1184 val = get_unaligned_le24(id.poison_list_max_mer);
1185 mds->poison.max_errors = min_t(u32, val, CXL_POISON_LIST_MAX);
1186 }
1187
1188 return 0;
1189 }
1190 EXPORT_SYMBOL_NS_GPL(cxl_dev_state_identify, "CXL");
1191
__cxl_mem_sanitize(struct cxl_memdev_state * mds,u16 cmd)1192 static int __cxl_mem_sanitize(struct cxl_memdev_state *mds, u16 cmd)
1193 {
1194 struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
1195 int rc;
1196 u32 sec_out = 0;
1197 struct cxl_get_security_output {
1198 __le32 flags;
1199 } out;
1200 struct cxl_mbox_cmd sec_cmd = {
1201 .opcode = CXL_MBOX_OP_GET_SECURITY_STATE,
1202 .payload_out = &out,
1203 .size_out = sizeof(out),
1204 };
1205 struct cxl_mbox_cmd mbox_cmd = { .opcode = cmd };
1206
1207 if (cmd != CXL_MBOX_OP_SANITIZE && cmd != CXL_MBOX_OP_SECURE_ERASE)
1208 return -EINVAL;
1209
1210 rc = cxl_internal_send_cmd(cxl_mbox, &sec_cmd);
1211 if (rc < 0) {
1212 dev_err(cxl_mbox->host, "Failed to get security state : %d", rc);
1213 return rc;
1214 }
1215
1216 /*
1217 * Prior to using these commands, any security applied to
1218 * the user data areas of the device shall be DISABLED (or
1219 * UNLOCKED for secure erase case).
1220 */
1221 sec_out = le32_to_cpu(out.flags);
1222 if (sec_out & CXL_PMEM_SEC_STATE_USER_PASS_SET)
1223 return -EINVAL;
1224
1225 if (cmd == CXL_MBOX_OP_SECURE_ERASE &&
1226 sec_out & CXL_PMEM_SEC_STATE_LOCKED)
1227 return -EINVAL;
1228
1229 rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
1230 if (rc < 0) {
1231 dev_err(cxl_mbox->host, "Failed to sanitize device : %d", rc);
1232 return rc;
1233 }
1234
1235 return 0;
1236 }
1237
1238
1239 /**
1240 * cxl_mem_sanitize() - Send a sanitization command to the device.
1241 * @cxlmd: The device for the operation
1242 * @cmd: The specific sanitization command opcode
1243 *
1244 * Return: 0 if the command was executed successfully, regardless of
1245 * whether or not the actual security operation is done in the background,
1246 * such as for the Sanitize case.
1247 * Error return values can be the result of the mailbox command, -EINVAL
1248 * when security requirements are not met or invalid contexts, or -EBUSY
1249 * if the sanitize operation is already in flight.
1250 *
1251 * See CXL 3.0 @8.2.9.8.5.1 Sanitize and @8.2.9.8.5.2 Secure Erase.
1252 */
cxl_mem_sanitize(struct cxl_memdev * cxlmd,u16 cmd)1253 int cxl_mem_sanitize(struct cxl_memdev *cxlmd, u16 cmd)
1254 {
1255 struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
1256 struct cxl_port *endpoint;
1257
1258 /* synchronize with cxl_mem_probe() and decoder write operations */
1259 guard(device)(&cxlmd->dev);
1260 endpoint = cxlmd->endpoint;
1261 guard(rwsem_read)(&cxl_region_rwsem);
1262 /*
1263 * Require an endpoint to be safe otherwise the driver can not
1264 * be sure that the device is unmapped.
1265 */
1266 if (endpoint && cxl_num_decoders_committed(endpoint) == 0)
1267 return __cxl_mem_sanitize(mds, cmd);
1268
1269 return -EBUSY;
1270 }
1271
add_part(struct cxl_dpa_info * info,u64 start,u64 size,enum cxl_partition_mode mode)1272 static void add_part(struct cxl_dpa_info *info, u64 start, u64 size, enum cxl_partition_mode mode)
1273 {
1274 int i = info->nr_partitions;
1275
1276 if (size == 0)
1277 return;
1278
1279 info->part[i].range = (struct range) {
1280 .start = start,
1281 .end = start + size - 1,
1282 };
1283 info->part[i].mode = mode;
1284 info->nr_partitions++;
1285 }
1286
cxl_mem_dpa_fetch(struct cxl_memdev_state * mds,struct cxl_dpa_info * info)1287 int cxl_mem_dpa_fetch(struct cxl_memdev_state *mds, struct cxl_dpa_info *info)
1288 {
1289 struct cxl_dev_state *cxlds = &mds->cxlds;
1290 struct device *dev = cxlds->dev;
1291 int rc;
1292
1293 if (!cxlds->media_ready) {
1294 info->size = 0;
1295 return 0;
1296 }
1297
1298 info->size = mds->total_bytes;
1299
1300 if (mds->partition_align_bytes == 0) {
1301 add_part(info, 0, mds->volatile_only_bytes, CXL_PARTMODE_RAM);
1302 add_part(info, mds->volatile_only_bytes,
1303 mds->persistent_only_bytes, CXL_PARTMODE_PMEM);
1304 return 0;
1305 }
1306
1307 rc = cxl_mem_get_partition_info(mds);
1308 if (rc) {
1309 dev_err(dev, "Failed to query partition information\n");
1310 return rc;
1311 }
1312
1313 add_part(info, 0, mds->active_volatile_bytes, CXL_PARTMODE_RAM);
1314 add_part(info, mds->active_volatile_bytes, mds->active_persistent_bytes,
1315 CXL_PARTMODE_PMEM);
1316
1317 return 0;
1318 }
1319 EXPORT_SYMBOL_NS_GPL(cxl_mem_dpa_fetch, "CXL");
1320
cxl_get_dirty_count(struct cxl_memdev_state * mds,u32 * count)1321 int cxl_get_dirty_count(struct cxl_memdev_state *mds, u32 *count)
1322 {
1323 struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
1324 struct cxl_mbox_get_health_info_out hi;
1325 struct cxl_mbox_cmd mbox_cmd;
1326 int rc;
1327
1328 mbox_cmd = (struct cxl_mbox_cmd) {
1329 .opcode = CXL_MBOX_OP_GET_HEALTH_INFO,
1330 .size_out = sizeof(hi),
1331 .payload_out = &hi,
1332 };
1333
1334 rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
1335 if (!rc)
1336 *count = le32_to_cpu(hi.dirty_shutdown_cnt);
1337
1338 return rc;
1339 }
1340 EXPORT_SYMBOL_NS_GPL(cxl_get_dirty_count, "CXL");
1341
cxl_arm_dirty_shutdown(struct cxl_memdev_state * mds)1342 int cxl_arm_dirty_shutdown(struct cxl_memdev_state *mds)
1343 {
1344 struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
1345 struct cxl_mbox_cmd mbox_cmd;
1346 struct cxl_mbox_set_shutdown_state_in in = {
1347 .state = 1
1348 };
1349
1350 mbox_cmd = (struct cxl_mbox_cmd) {
1351 .opcode = CXL_MBOX_OP_SET_SHUTDOWN_STATE,
1352 .size_in = sizeof(in),
1353 .payload_in = &in,
1354 };
1355
1356 return cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
1357 }
1358 EXPORT_SYMBOL_NS_GPL(cxl_arm_dirty_shutdown, "CXL");
1359
cxl_set_timestamp(struct cxl_memdev_state * mds)1360 int cxl_set_timestamp(struct cxl_memdev_state *mds)
1361 {
1362 struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
1363 struct cxl_mbox_cmd mbox_cmd;
1364 struct cxl_mbox_set_timestamp_in pi;
1365 int rc;
1366
1367 pi.timestamp = cpu_to_le64(ktime_get_real_ns());
1368 mbox_cmd = (struct cxl_mbox_cmd) {
1369 .opcode = CXL_MBOX_OP_SET_TIMESTAMP,
1370 .size_in = sizeof(pi),
1371 .payload_in = &pi,
1372 };
1373
1374 rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
1375 /*
1376 * Command is optional. Devices may have another way of providing
1377 * a timestamp, or may return all 0s in timestamp fields.
1378 * Don't report an error if this command isn't supported
1379 */
1380 if (rc && (mbox_cmd.return_code != CXL_MBOX_CMD_RC_UNSUPPORTED))
1381 return rc;
1382
1383 return 0;
1384 }
1385 EXPORT_SYMBOL_NS_GPL(cxl_set_timestamp, "CXL");
1386
cxl_mem_get_poison(struct cxl_memdev * cxlmd,u64 offset,u64 len,struct cxl_region * cxlr)1387 int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len,
1388 struct cxl_region *cxlr)
1389 {
1390 struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
1391 struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox;
1392 struct cxl_mbox_poison_out *po;
1393 struct cxl_mbox_poison_in pi;
1394 int nr_records = 0;
1395 int rc;
1396
1397 rc = mutex_lock_interruptible(&mds->poison.lock);
1398 if (rc)
1399 return rc;
1400
1401 po = mds->poison.list_out;
1402 pi.offset = cpu_to_le64(offset);
1403 pi.length = cpu_to_le64(len / CXL_POISON_LEN_MULT);
1404
1405 do {
1406 struct cxl_mbox_cmd mbox_cmd = (struct cxl_mbox_cmd){
1407 .opcode = CXL_MBOX_OP_GET_POISON,
1408 .size_in = sizeof(pi),
1409 .payload_in = &pi,
1410 .size_out = cxl_mbox->payload_size,
1411 .payload_out = po,
1412 .min_out = struct_size(po, record, 0),
1413 };
1414
1415 rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
1416 if (rc)
1417 break;
1418
1419 for (int i = 0; i < le16_to_cpu(po->count); i++)
1420 trace_cxl_poison(cxlmd, cxlr, &po->record[i],
1421 po->flags, po->overflow_ts,
1422 CXL_POISON_TRACE_LIST);
1423
1424 /* Protect against an uncleared _FLAG_MORE */
1425 nr_records = nr_records + le16_to_cpu(po->count);
1426 if (nr_records >= mds->poison.max_errors) {
1427 dev_dbg(&cxlmd->dev, "Max Error Records reached: %d\n",
1428 nr_records);
1429 break;
1430 }
1431 } while (po->flags & CXL_POISON_FLAG_MORE);
1432
1433 mutex_unlock(&mds->poison.lock);
1434 return rc;
1435 }
1436 EXPORT_SYMBOL_NS_GPL(cxl_mem_get_poison, "CXL");
1437
free_poison_buf(void * buf)1438 static void free_poison_buf(void *buf)
1439 {
1440 kvfree(buf);
1441 }
1442
1443 /* Get Poison List output buffer is protected by mds->poison.lock */
cxl_poison_alloc_buf(struct cxl_memdev_state * mds)1444 static int cxl_poison_alloc_buf(struct cxl_memdev_state *mds)
1445 {
1446 struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
1447
1448 mds->poison.list_out = kvmalloc(cxl_mbox->payload_size, GFP_KERNEL);
1449 if (!mds->poison.list_out)
1450 return -ENOMEM;
1451
1452 return devm_add_action_or_reset(mds->cxlds.dev, free_poison_buf,
1453 mds->poison.list_out);
1454 }
1455
cxl_poison_state_init(struct cxl_memdev_state * mds)1456 int cxl_poison_state_init(struct cxl_memdev_state *mds)
1457 {
1458 int rc;
1459
1460 if (!test_bit(CXL_POISON_ENABLED_LIST, mds->poison.enabled_cmds))
1461 return 0;
1462
1463 rc = cxl_poison_alloc_buf(mds);
1464 if (rc) {
1465 clear_bit(CXL_POISON_ENABLED_LIST, mds->poison.enabled_cmds);
1466 return rc;
1467 }
1468
1469 mutex_init(&mds->poison.lock);
1470 return 0;
1471 }
1472 EXPORT_SYMBOL_NS_GPL(cxl_poison_state_init, "CXL");
1473
cxl_mailbox_init(struct cxl_mailbox * cxl_mbox,struct device * host)1474 int cxl_mailbox_init(struct cxl_mailbox *cxl_mbox, struct device *host)
1475 {
1476 if (!cxl_mbox || !host)
1477 return -EINVAL;
1478
1479 cxl_mbox->host = host;
1480 mutex_init(&cxl_mbox->mbox_mutex);
1481 rcuwait_init(&cxl_mbox->mbox_wait);
1482
1483 return 0;
1484 }
1485 EXPORT_SYMBOL_NS_GPL(cxl_mailbox_init, "CXL");
1486
cxl_memdev_state_create(struct device * dev)1487 struct cxl_memdev_state *cxl_memdev_state_create(struct device *dev)
1488 {
1489 struct cxl_memdev_state *mds;
1490 int rc;
1491
1492 mds = devm_kzalloc(dev, sizeof(*mds), GFP_KERNEL);
1493 if (!mds) {
1494 dev_err(dev, "No memory available\n");
1495 return ERR_PTR(-ENOMEM);
1496 }
1497
1498 mutex_init(&mds->event.log_lock);
1499 mds->cxlds.dev = dev;
1500 mds->cxlds.reg_map.host = dev;
1501 mds->cxlds.cxl_mbox.host = dev;
1502 mds->cxlds.reg_map.resource = CXL_RESOURCE_NONE;
1503 mds->cxlds.type = CXL_DEVTYPE_CLASSMEM;
1504
1505 rc = devm_cxl_register_mce_notifier(dev, &mds->mce_notifier);
1506 if (rc == -EOPNOTSUPP)
1507 dev_warn(dev, "CXL MCE unsupported\n");
1508 else if (rc)
1509 return ERR_PTR(rc);
1510
1511 return mds;
1512 }
1513 EXPORT_SYMBOL_NS_GPL(cxl_memdev_state_create, "CXL");
1514
cxl_mbox_init(void)1515 void __init cxl_mbox_init(void)
1516 {
1517 struct dentry *mbox_debugfs;
1518
1519 mbox_debugfs = cxl_debugfs_create_dir("mbox");
1520 debugfs_create_bool("raw_allow_all", 0600, mbox_debugfs,
1521 &cxl_raw_allow_all);
1522 }
1523