1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2024-2025 Intel Corporation. All rights reserved. */
3 #include <linux/fwctl.h>
4 #include <linux/device.h>
5 #include <cxl/mailbox.h>
6 #include <cxl/features.h>
7 #include <uapi/fwctl/cxl.h>
8 #include "cxl.h"
9 #include "core.h"
10 #include "cxlmem.h"
11
12 /* All the features below are exclusive to the kernel */
13 static const uuid_t cxl_exclusive_feats[] = {
14 CXL_FEAT_PATROL_SCRUB_UUID,
15 CXL_FEAT_ECS_UUID,
16 CXL_FEAT_SPPR_UUID,
17 CXL_FEAT_HPPR_UUID,
18 CXL_FEAT_CACHELINE_SPARING_UUID,
19 CXL_FEAT_ROW_SPARING_UUID,
20 CXL_FEAT_BANK_SPARING_UUID,
21 CXL_FEAT_RANK_SPARING_UUID,
22 };
23
is_cxl_feature_exclusive_by_uuid(const uuid_t * uuid)24 static bool is_cxl_feature_exclusive_by_uuid(const uuid_t *uuid)
25 {
26 for (int i = 0; i < ARRAY_SIZE(cxl_exclusive_feats); i++) {
27 if (uuid_equal(uuid, &cxl_exclusive_feats[i]))
28 return true;
29 }
30
31 return false;
32 }
33
is_cxl_feature_exclusive(struct cxl_feat_entry * entry)34 static bool is_cxl_feature_exclusive(struct cxl_feat_entry *entry)
35 {
36 return is_cxl_feature_exclusive_by_uuid(&entry->uuid);
37 }
38
to_cxlfs(struct cxl_dev_state * cxlds)39 inline struct cxl_features_state *to_cxlfs(struct cxl_dev_state *cxlds)
40 {
41 return cxlds->cxlfs;
42 }
43 EXPORT_SYMBOL_NS_GPL(to_cxlfs, "CXL");
44
cxl_get_supported_features_count(struct cxl_mailbox * cxl_mbox)45 static int cxl_get_supported_features_count(struct cxl_mailbox *cxl_mbox)
46 {
47 struct cxl_mbox_get_sup_feats_out mbox_out;
48 struct cxl_mbox_get_sup_feats_in mbox_in;
49 struct cxl_mbox_cmd mbox_cmd;
50 int rc;
51
52 memset(&mbox_in, 0, sizeof(mbox_in));
53 mbox_in.count = cpu_to_le32(sizeof(mbox_out));
54 memset(&mbox_out, 0, sizeof(mbox_out));
55 mbox_cmd = (struct cxl_mbox_cmd) {
56 .opcode = CXL_MBOX_OP_GET_SUPPORTED_FEATURES,
57 .size_in = sizeof(mbox_in),
58 .payload_in = &mbox_in,
59 .size_out = sizeof(mbox_out),
60 .payload_out = &mbox_out,
61 .min_out = sizeof(mbox_out),
62 };
63 rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
64 if (rc < 0)
65 return rc;
66
67 return le16_to_cpu(mbox_out.supported_feats);
68 }
69
70 static struct cxl_feat_entries *
get_supported_features(struct cxl_features_state * cxlfs)71 get_supported_features(struct cxl_features_state *cxlfs)
72 {
73 int remain_feats, max_size, max_feats, start, rc, hdr_size;
74 struct cxl_mailbox *cxl_mbox = &cxlfs->cxlds->cxl_mbox;
75 int feat_size = sizeof(struct cxl_feat_entry);
76 struct cxl_mbox_get_sup_feats_in mbox_in;
77 struct cxl_feat_entry *entry;
78 struct cxl_mbox_cmd mbox_cmd;
79 int user_feats = 0;
80 int count;
81
82 count = cxl_get_supported_features_count(cxl_mbox);
83 if (count <= 0)
84 return NULL;
85
86 struct cxl_feat_entries *entries __free(kvfree) =
87 kvmalloc(struct_size(entries, ent, count), GFP_KERNEL);
88 if (!entries)
89 return NULL;
90
91 struct cxl_mbox_get_sup_feats_out *mbox_out __free(kvfree) =
92 kvmalloc(cxl_mbox->payload_size, GFP_KERNEL);
93 if (!mbox_out)
94 return NULL;
95
96 hdr_size = struct_size(mbox_out, ents, 0);
97 max_size = cxl_mbox->payload_size - hdr_size;
98 /* max feat entries that can fit in mailbox max payload size */
99 max_feats = max_size / feat_size;
100 entry = entries->ent;
101
102 start = 0;
103 remain_feats = count;
104 do {
105 int retrieved, alloc_size, copy_feats;
106 int num_entries;
107
108 if (remain_feats > max_feats) {
109 alloc_size = struct_size(mbox_out, ents, max_feats);
110 remain_feats = remain_feats - max_feats;
111 copy_feats = max_feats;
112 } else {
113 alloc_size = struct_size(mbox_out, ents, remain_feats);
114 copy_feats = remain_feats;
115 remain_feats = 0;
116 }
117
118 memset(&mbox_in, 0, sizeof(mbox_in));
119 mbox_in.count = cpu_to_le32(alloc_size);
120 mbox_in.start_idx = cpu_to_le16(start);
121 memset(mbox_out, 0, alloc_size);
122 mbox_cmd = (struct cxl_mbox_cmd) {
123 .opcode = CXL_MBOX_OP_GET_SUPPORTED_FEATURES,
124 .size_in = sizeof(mbox_in),
125 .payload_in = &mbox_in,
126 .size_out = alloc_size,
127 .payload_out = mbox_out,
128 .min_out = hdr_size,
129 };
130 rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
131 if (rc < 0)
132 return NULL;
133
134 if (mbox_cmd.size_out <= hdr_size)
135 return NULL;
136
137 /*
138 * Make sure retrieved out buffer is multiple of feature
139 * entries.
140 */
141 retrieved = mbox_cmd.size_out - hdr_size;
142 if (retrieved % feat_size)
143 return NULL;
144
145 num_entries = le16_to_cpu(mbox_out->num_entries);
146 /*
147 * If the reported output entries * defined entry size !=
148 * retrieved output bytes, then the output package is incorrect.
149 */
150 if (num_entries * feat_size != retrieved)
151 return NULL;
152
153 memcpy(entry, mbox_out->ents, retrieved);
154 for (int i = 0; i < num_entries; i++) {
155 if (!is_cxl_feature_exclusive(entry + i))
156 user_feats++;
157 }
158 entry += num_entries;
159 /*
160 * If the number of output entries is less than expected, add the
161 * remaining entries to the next batch.
162 */
163 remain_feats += copy_feats - num_entries;
164 start += num_entries;
165 } while (remain_feats);
166
167 entries->num_features = count;
168 entries->num_user_features = user_feats;
169
170 return no_free_ptr(entries);
171 }
172
free_cxlfs(void * _cxlfs)173 static void free_cxlfs(void *_cxlfs)
174 {
175 struct cxl_features_state *cxlfs = _cxlfs;
176 struct cxl_dev_state *cxlds = cxlfs->cxlds;
177
178 cxlds->cxlfs = NULL;
179 kvfree(cxlfs->entries);
180 kfree(cxlfs);
181 }
182
183 /**
184 * devm_cxl_setup_features() - Allocate and initialize features context
185 * @cxlds: CXL device context
186 *
187 * Return 0 on success or -errno on failure.
188 */
devm_cxl_setup_features(struct cxl_dev_state * cxlds)189 int devm_cxl_setup_features(struct cxl_dev_state *cxlds)
190 {
191 struct cxl_mailbox *cxl_mbox = &cxlds->cxl_mbox;
192
193 if (cxl_mbox->feat_cap < CXL_FEATURES_RO)
194 return -ENODEV;
195
196 struct cxl_features_state *cxlfs __free(kfree) =
197 kzalloc(sizeof(*cxlfs), GFP_KERNEL);
198 if (!cxlfs)
199 return -ENOMEM;
200
201 cxlfs->cxlds = cxlds;
202
203 cxlfs->entries = get_supported_features(cxlfs);
204 if (!cxlfs->entries)
205 return -ENOMEM;
206
207 cxlds->cxlfs = cxlfs;
208
209 return devm_add_action_or_reset(cxlds->dev, free_cxlfs, no_free_ptr(cxlfs));
210 }
211 EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_features, "CXL");
212
cxl_get_feature(struct cxl_mailbox * cxl_mbox,const uuid_t * feat_uuid,enum cxl_get_feat_selection selection,void * feat_out,size_t feat_out_size,u16 offset,u16 * return_code)213 size_t cxl_get_feature(struct cxl_mailbox *cxl_mbox, const uuid_t *feat_uuid,
214 enum cxl_get_feat_selection selection,
215 void *feat_out, size_t feat_out_size, u16 offset,
216 u16 *return_code)
217 {
218 size_t data_to_rd_size, size_out;
219 struct cxl_mbox_get_feat_in pi;
220 struct cxl_mbox_cmd mbox_cmd;
221 size_t data_rcvd_size = 0;
222 int rc;
223
224 if (return_code)
225 *return_code = CXL_MBOX_CMD_RC_INPUT;
226
227 if (!feat_out || !feat_out_size)
228 return 0;
229
230 size_out = min(feat_out_size, cxl_mbox->payload_size);
231 uuid_copy(&pi.uuid, feat_uuid);
232 pi.selection = selection;
233 do {
234 data_to_rd_size = min(feat_out_size - data_rcvd_size,
235 cxl_mbox->payload_size);
236 pi.offset = cpu_to_le16(offset + data_rcvd_size);
237 pi.count = cpu_to_le16(data_to_rd_size);
238
239 mbox_cmd = (struct cxl_mbox_cmd) {
240 .opcode = CXL_MBOX_OP_GET_FEATURE,
241 .size_in = sizeof(pi),
242 .payload_in = &pi,
243 .size_out = size_out,
244 .payload_out = feat_out + data_rcvd_size,
245 .min_out = data_to_rd_size,
246 };
247 rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
248 if (rc < 0 || !mbox_cmd.size_out) {
249 if (return_code)
250 *return_code = mbox_cmd.return_code;
251 return 0;
252 }
253 data_rcvd_size += mbox_cmd.size_out;
254 } while (data_rcvd_size < feat_out_size);
255
256 if (return_code)
257 *return_code = CXL_MBOX_CMD_RC_SUCCESS;
258
259 return data_rcvd_size;
260 }
261
262 /*
263 * FEAT_DATA_MIN_PAYLOAD_SIZE - min extra number of bytes should be
264 * available in the mailbox for storing the actual feature data so that
265 * the feature data transfer would work as expected.
266 */
267 #define FEAT_DATA_MIN_PAYLOAD_SIZE 10
cxl_set_feature(struct cxl_mailbox * cxl_mbox,const uuid_t * feat_uuid,u8 feat_version,const void * feat_data,size_t feat_data_size,u32 feat_flag,u16 offset,u16 * return_code)268 int cxl_set_feature(struct cxl_mailbox *cxl_mbox,
269 const uuid_t *feat_uuid, u8 feat_version,
270 const void *feat_data, size_t feat_data_size,
271 u32 feat_flag, u16 offset, u16 *return_code)
272 {
273 size_t data_in_size, data_sent_size = 0;
274 struct cxl_mbox_cmd mbox_cmd;
275 size_t hdr_size;
276
277 if (return_code)
278 *return_code = CXL_MBOX_CMD_RC_INPUT;
279
280 struct cxl_mbox_set_feat_in *pi __free(kfree) =
281 kzalloc(cxl_mbox->payload_size, GFP_KERNEL);
282 if (!pi)
283 return -ENOMEM;
284
285 uuid_copy(&pi->uuid, feat_uuid);
286 pi->version = feat_version;
287 feat_flag &= ~CXL_SET_FEAT_FLAG_DATA_TRANSFER_MASK;
288 feat_flag |= CXL_SET_FEAT_FLAG_DATA_SAVED_ACROSS_RESET;
289 hdr_size = sizeof(pi->hdr);
290 /*
291 * Check minimum mbox payload size is available for
292 * the feature data transfer.
293 */
294 if (hdr_size + FEAT_DATA_MIN_PAYLOAD_SIZE > cxl_mbox->payload_size)
295 return -ENOMEM;
296
297 if (hdr_size + feat_data_size <= cxl_mbox->payload_size) {
298 pi->flags = cpu_to_le32(feat_flag |
299 CXL_SET_FEAT_FLAG_FULL_DATA_TRANSFER);
300 data_in_size = feat_data_size;
301 } else {
302 pi->flags = cpu_to_le32(feat_flag |
303 CXL_SET_FEAT_FLAG_INITIATE_DATA_TRANSFER);
304 data_in_size = cxl_mbox->payload_size - hdr_size;
305 }
306
307 do {
308 int rc;
309
310 pi->offset = cpu_to_le16(offset + data_sent_size);
311 memcpy(pi->feat_data, feat_data + data_sent_size, data_in_size);
312 mbox_cmd = (struct cxl_mbox_cmd) {
313 .opcode = CXL_MBOX_OP_SET_FEATURE,
314 .size_in = hdr_size + data_in_size,
315 .payload_in = pi,
316 };
317 rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
318 if (rc < 0) {
319 if (return_code)
320 *return_code = mbox_cmd.return_code;
321 return rc;
322 }
323
324 data_sent_size += data_in_size;
325 if (data_sent_size >= feat_data_size) {
326 if (return_code)
327 *return_code = CXL_MBOX_CMD_RC_SUCCESS;
328 return 0;
329 }
330
331 if ((feat_data_size - data_sent_size) <= (cxl_mbox->payload_size - hdr_size)) {
332 data_in_size = feat_data_size - data_sent_size;
333 pi->flags = cpu_to_le32(feat_flag |
334 CXL_SET_FEAT_FLAG_FINISH_DATA_TRANSFER);
335 } else {
336 pi->flags = cpu_to_le32(feat_flag |
337 CXL_SET_FEAT_FLAG_CONTINUE_DATA_TRANSFER);
338 }
339 } while (true);
340 }
341
342 /* FWCTL support */
343
fwctl_to_memdev(struct fwctl_device * fwctl_dev)344 static inline struct cxl_memdev *fwctl_to_memdev(struct fwctl_device *fwctl_dev)
345 {
346 return to_cxl_memdev(fwctl_dev->dev.parent);
347 }
348
cxlctl_open_uctx(struct fwctl_uctx * uctx)349 static int cxlctl_open_uctx(struct fwctl_uctx *uctx)
350 {
351 return 0;
352 }
353
cxlctl_close_uctx(struct fwctl_uctx * uctx)354 static void cxlctl_close_uctx(struct fwctl_uctx *uctx)
355 {
356 }
357
358 static struct cxl_feat_entry *
get_support_feature_info(struct cxl_features_state * cxlfs,const struct fwctl_rpc_cxl * rpc_in)359 get_support_feature_info(struct cxl_features_state *cxlfs,
360 const struct fwctl_rpc_cxl *rpc_in)
361 {
362 struct cxl_feat_entry *feat;
363 const uuid_t *uuid;
364
365 if (rpc_in->op_size < sizeof(uuid))
366 return ERR_PTR(-EINVAL);
367
368 uuid = &rpc_in->set_feat_in.uuid;
369
370 for (int i = 0; i < cxlfs->entries->num_features; i++) {
371 feat = &cxlfs->entries->ent[i];
372 if (uuid_equal(uuid, &feat->uuid))
373 return feat;
374 }
375
376 return ERR_PTR(-EINVAL);
377 }
378
cxlctl_get_supported_features(struct cxl_features_state * cxlfs,const struct fwctl_rpc_cxl * rpc_in,size_t * out_len)379 static void *cxlctl_get_supported_features(struct cxl_features_state *cxlfs,
380 const struct fwctl_rpc_cxl *rpc_in,
381 size_t *out_len)
382 {
383 const struct cxl_mbox_get_sup_feats_in *feat_in;
384 struct cxl_mbox_get_sup_feats_out *feat_out;
385 struct cxl_feat_entry *pos;
386 size_t out_size;
387 int requested;
388 u32 count;
389 u16 start;
390 int i;
391
392 if (rpc_in->op_size != sizeof(*feat_in))
393 return ERR_PTR(-EINVAL);
394
395 feat_in = &rpc_in->get_sup_feats_in;
396 count = le32_to_cpu(feat_in->count);
397 start = le16_to_cpu(feat_in->start_idx);
398 requested = count / sizeof(*pos);
399
400 /*
401 * Make sure that the total requested number of entries is not greater
402 * than the total number of supported features allowed for userspace.
403 */
404 if (start >= cxlfs->entries->num_features)
405 return ERR_PTR(-EINVAL);
406
407 requested = min_t(int, requested, cxlfs->entries->num_features - start);
408
409 out_size = sizeof(struct fwctl_rpc_cxl_out) +
410 struct_size(feat_out, ents, requested);
411
412 struct fwctl_rpc_cxl_out *rpc_out __free(kvfree) =
413 kvzalloc(out_size, GFP_KERNEL);
414 if (!rpc_out)
415 return ERR_PTR(-ENOMEM);
416
417 rpc_out->size = struct_size(feat_out, ents, requested);
418 feat_out = &rpc_out->get_sup_feats_out;
419 if (requested == 0) {
420 feat_out->num_entries = cpu_to_le16(requested);
421 feat_out->supported_feats =
422 cpu_to_le16(cxlfs->entries->num_features);
423 rpc_out->retval = CXL_MBOX_CMD_RC_SUCCESS;
424 *out_len = out_size;
425 return no_free_ptr(rpc_out);
426 }
427
428 for (i = start, pos = &feat_out->ents[0];
429 i < cxlfs->entries->num_features; i++, pos++) {
430 if (i - start == requested)
431 break;
432
433 memcpy(pos, &cxlfs->entries->ent[i], sizeof(*pos));
434 /*
435 * If the feature is exclusive, set the set_feat_size to 0 to
436 * indicate that the feature is not changeable.
437 */
438 if (is_cxl_feature_exclusive(pos)) {
439 u32 flags;
440
441 pos->set_feat_size = 0;
442 flags = le32_to_cpu(pos->flags);
443 flags &= ~CXL_FEATURE_F_CHANGEABLE;
444 pos->flags = cpu_to_le32(flags);
445 }
446 }
447
448 feat_out->num_entries = cpu_to_le16(requested);
449 feat_out->supported_feats = cpu_to_le16(cxlfs->entries->num_features);
450 rpc_out->retval = CXL_MBOX_CMD_RC_SUCCESS;
451 *out_len = out_size;
452
453 return no_free_ptr(rpc_out);
454 }
455
cxlctl_get_feature(struct cxl_features_state * cxlfs,const struct fwctl_rpc_cxl * rpc_in,size_t * out_len)456 static void *cxlctl_get_feature(struct cxl_features_state *cxlfs,
457 const struct fwctl_rpc_cxl *rpc_in,
458 size_t *out_len)
459 {
460 struct cxl_mailbox *cxl_mbox = &cxlfs->cxlds->cxl_mbox;
461 const struct cxl_mbox_get_feat_in *feat_in;
462 u16 offset, count, return_code;
463 size_t out_size = *out_len;
464
465 if (rpc_in->op_size != sizeof(*feat_in))
466 return ERR_PTR(-EINVAL);
467
468 feat_in = &rpc_in->get_feat_in;
469 offset = le16_to_cpu(feat_in->offset);
470 count = le16_to_cpu(feat_in->count);
471
472 if (!count)
473 return ERR_PTR(-EINVAL);
474
475 struct fwctl_rpc_cxl_out *rpc_out __free(kvfree) =
476 kvzalloc(out_size, GFP_KERNEL);
477 if (!rpc_out)
478 return ERR_PTR(-ENOMEM);
479
480 out_size = cxl_get_feature(cxl_mbox, &feat_in->uuid,
481 feat_in->selection, rpc_out->payload,
482 count, offset, &return_code);
483 *out_len = sizeof(struct fwctl_rpc_cxl_out);
484 if (!out_size) {
485 rpc_out->size = 0;
486 rpc_out->retval = return_code;
487 return no_free_ptr(rpc_out);
488 }
489
490 rpc_out->size = out_size;
491 rpc_out->retval = CXL_MBOX_CMD_RC_SUCCESS;
492 *out_len += out_size;
493
494 return no_free_ptr(rpc_out);
495 }
496
cxlctl_set_feature(struct cxl_features_state * cxlfs,const struct fwctl_rpc_cxl * rpc_in,size_t * out_len)497 static void *cxlctl_set_feature(struct cxl_features_state *cxlfs,
498 const struct fwctl_rpc_cxl *rpc_in,
499 size_t *out_len)
500 {
501 struct cxl_mailbox *cxl_mbox = &cxlfs->cxlds->cxl_mbox;
502 const struct cxl_mbox_set_feat_in *feat_in;
503 size_t out_size, data_size;
504 u16 offset, return_code;
505 u32 flags;
506 int rc;
507
508 if (rpc_in->op_size <= sizeof(feat_in->hdr))
509 return ERR_PTR(-EINVAL);
510
511 feat_in = &rpc_in->set_feat_in;
512
513 if (is_cxl_feature_exclusive_by_uuid(&feat_in->uuid))
514 return ERR_PTR(-EPERM);
515
516 offset = le16_to_cpu(feat_in->offset);
517 flags = le32_to_cpu(feat_in->flags);
518 out_size = *out_len;
519
520 struct fwctl_rpc_cxl_out *rpc_out __free(kvfree) =
521 kvzalloc(out_size, GFP_KERNEL);
522 if (!rpc_out)
523 return ERR_PTR(-ENOMEM);
524
525 rpc_out->size = 0;
526
527 data_size = rpc_in->op_size - sizeof(feat_in->hdr);
528 rc = cxl_set_feature(cxl_mbox, &feat_in->uuid,
529 feat_in->version, feat_in->feat_data,
530 data_size, flags, offset, &return_code);
531 *out_len = sizeof(*rpc_out);
532 if (rc) {
533 rpc_out->retval = return_code;
534 return no_free_ptr(rpc_out);
535 }
536
537 rpc_out->retval = CXL_MBOX_CMD_RC_SUCCESS;
538
539 return no_free_ptr(rpc_out);
540 }
541
cxlctl_validate_set_features(struct cxl_features_state * cxlfs,const struct fwctl_rpc_cxl * rpc_in,enum fwctl_rpc_scope scope)542 static bool cxlctl_validate_set_features(struct cxl_features_state *cxlfs,
543 const struct fwctl_rpc_cxl *rpc_in,
544 enum fwctl_rpc_scope scope)
545 {
546 u16 effects, imm_mask, reset_mask;
547 struct cxl_feat_entry *feat;
548 u32 flags;
549
550 feat = get_support_feature_info(cxlfs, rpc_in);
551 if (IS_ERR(feat))
552 return false;
553
554 /* Ensure that the attribute is changeable */
555 flags = le32_to_cpu(feat->flags);
556 if (!(flags & CXL_FEATURE_F_CHANGEABLE))
557 return false;
558
559 effects = le16_to_cpu(feat->effects);
560
561 /*
562 * Reserved bits are set, rejecting since the effects is not
563 * comprehended by the driver.
564 */
565 if (effects & CXL_CMD_EFFECTS_RESERVED) {
566 dev_warn_once(cxlfs->cxlds->dev,
567 "Reserved bits set in the Feature effects field!\n");
568 return false;
569 }
570
571 /* Currently no user background command support */
572 if (effects & CXL_CMD_BACKGROUND)
573 return false;
574
575 /* Effects cause immediate change, highest security scope is needed */
576 imm_mask = CXL_CMD_CONFIG_CHANGE_IMMEDIATE |
577 CXL_CMD_DATA_CHANGE_IMMEDIATE |
578 CXL_CMD_POLICY_CHANGE_IMMEDIATE |
579 CXL_CMD_LOG_CHANGE_IMMEDIATE;
580
581 reset_mask = CXL_CMD_CONFIG_CHANGE_COLD_RESET |
582 CXL_CMD_CONFIG_CHANGE_CONV_RESET |
583 CXL_CMD_CONFIG_CHANGE_CXL_RESET;
584
585 /* If no immediate or reset effect set, The hardware has a bug */
586 if (!(effects & imm_mask) && !(effects & reset_mask))
587 return false;
588
589 /*
590 * If the Feature setting causes immediate configuration change
591 * then we need the full write permission policy.
592 */
593 if (effects & imm_mask && scope >= FWCTL_RPC_DEBUG_WRITE_FULL)
594 return true;
595
596 /*
597 * If the Feature setting only causes configuration change
598 * after a reset, then the lesser level of write permission
599 * policy is ok.
600 */
601 if (!(effects & imm_mask) && scope >= FWCTL_RPC_DEBUG_WRITE)
602 return true;
603
604 return false;
605 }
606
cxlctl_validate_hw_command(struct cxl_features_state * cxlfs,const struct fwctl_rpc_cxl * rpc_in,enum fwctl_rpc_scope scope,u16 opcode)607 static bool cxlctl_validate_hw_command(struct cxl_features_state *cxlfs,
608 const struct fwctl_rpc_cxl *rpc_in,
609 enum fwctl_rpc_scope scope,
610 u16 opcode)
611 {
612 struct cxl_mailbox *cxl_mbox = &cxlfs->cxlds->cxl_mbox;
613
614 switch (opcode) {
615 case CXL_MBOX_OP_GET_SUPPORTED_FEATURES:
616 case CXL_MBOX_OP_GET_FEATURE:
617 if (cxl_mbox->feat_cap < CXL_FEATURES_RO)
618 return false;
619 if (scope >= FWCTL_RPC_CONFIGURATION)
620 return true;
621 return false;
622 case CXL_MBOX_OP_SET_FEATURE:
623 if (cxl_mbox->feat_cap < CXL_FEATURES_RW)
624 return false;
625 return cxlctl_validate_set_features(cxlfs, rpc_in, scope);
626 default:
627 return false;
628 }
629 }
630
cxlctl_handle_commands(struct cxl_features_state * cxlfs,const struct fwctl_rpc_cxl * rpc_in,size_t * out_len,u16 opcode)631 static void *cxlctl_handle_commands(struct cxl_features_state *cxlfs,
632 const struct fwctl_rpc_cxl *rpc_in,
633 size_t *out_len, u16 opcode)
634 {
635 switch (opcode) {
636 case CXL_MBOX_OP_GET_SUPPORTED_FEATURES:
637 return cxlctl_get_supported_features(cxlfs, rpc_in, out_len);
638 case CXL_MBOX_OP_GET_FEATURE:
639 return cxlctl_get_feature(cxlfs, rpc_in, out_len);
640 case CXL_MBOX_OP_SET_FEATURE:
641 return cxlctl_set_feature(cxlfs, rpc_in, out_len);
642 default:
643 return ERR_PTR(-EOPNOTSUPP);
644 }
645 }
646
cxlctl_fw_rpc(struct fwctl_uctx * uctx,enum fwctl_rpc_scope scope,void * in,size_t in_len,size_t * out_len)647 static void *cxlctl_fw_rpc(struct fwctl_uctx *uctx, enum fwctl_rpc_scope scope,
648 void *in, size_t in_len, size_t *out_len)
649 {
650 struct fwctl_device *fwctl_dev = uctx->fwctl;
651 struct cxl_memdev *cxlmd = fwctl_to_memdev(fwctl_dev);
652 struct cxl_features_state *cxlfs = to_cxlfs(cxlmd->cxlds);
653 const struct fwctl_rpc_cxl *rpc_in = in;
654 u16 opcode = rpc_in->opcode;
655
656 if (!cxlctl_validate_hw_command(cxlfs, rpc_in, scope, opcode))
657 return ERR_PTR(-EINVAL);
658
659 return cxlctl_handle_commands(cxlfs, rpc_in, out_len, opcode);
660 }
661
662 static const struct fwctl_ops cxlctl_ops = {
663 .device_type = FWCTL_DEVICE_TYPE_CXL,
664 .uctx_size = sizeof(struct fwctl_uctx),
665 .open_uctx = cxlctl_open_uctx,
666 .close_uctx = cxlctl_close_uctx,
667 .fw_rpc = cxlctl_fw_rpc,
668 };
669
DEFINE_FREE(free_fwctl_dev,struct fwctl_device *,if (_T)fwctl_put (_T))670 DEFINE_FREE(free_fwctl_dev, struct fwctl_device *, if (_T) fwctl_put(_T))
671
672 static void free_memdev_fwctl(void *_fwctl_dev)
673 {
674 struct fwctl_device *fwctl_dev = _fwctl_dev;
675
676 fwctl_unregister(fwctl_dev);
677 fwctl_put(fwctl_dev);
678 }
679
devm_cxl_setup_fwctl(struct device * host,struct cxl_memdev * cxlmd)680 int devm_cxl_setup_fwctl(struct device *host, struct cxl_memdev *cxlmd)
681 {
682 struct cxl_dev_state *cxlds = cxlmd->cxlds;
683 struct cxl_features_state *cxlfs;
684 int rc;
685
686 cxlfs = to_cxlfs(cxlds);
687 if (!cxlfs)
688 return -ENODEV;
689
690 /* No need to setup FWCTL if there are no user allowed features found */
691 if (!cxlfs->entries->num_user_features)
692 return -ENODEV;
693
694 struct fwctl_device *fwctl_dev __free(free_fwctl_dev) =
695 _fwctl_alloc_device(&cxlmd->dev, &cxlctl_ops, sizeof(*fwctl_dev));
696 if (!fwctl_dev)
697 return -ENOMEM;
698
699 rc = fwctl_register(fwctl_dev);
700 if (rc)
701 return rc;
702
703 return devm_add_action_or_reset(host, free_memdev_fwctl,
704 no_free_ptr(fwctl_dev));
705 }
706 EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_fwctl, "CXL");
707
708 MODULE_IMPORT_NS("FWCTL");
709