1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Sysfs interface for the NVMe core driver.
4  *
5  * Copyright (c) 2011-2014, Intel Corporation.
6  */
7 
8 #include <linux/nvme-auth.h>
9 
10 #include "nvme.h"
11 #include "fabrics.h"
12 
nvme_sysfs_reset(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)13 static ssize_t nvme_sysfs_reset(struct device *dev,
14 				struct device_attribute *attr, const char *buf,
15 				size_t count)
16 {
17 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
18 	int ret;
19 
20 	ret = nvme_reset_ctrl_sync(ctrl);
21 	if (ret < 0)
22 		return ret;
23 	return count;
24 }
25 static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset);
26 
nvme_sysfs_rescan(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)27 static ssize_t nvme_sysfs_rescan(struct device *dev,
28 				struct device_attribute *attr, const char *buf,
29 				size_t count)
30 {
31 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
32 
33 	nvme_queue_scan(ctrl);
34 	return count;
35 }
36 static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan);
37 
nvme_adm_passthru_err_log_enabled_show(struct device * dev,struct device_attribute * attr,char * buf)38 static ssize_t nvme_adm_passthru_err_log_enabled_show(struct device *dev,
39 		struct device_attribute *attr, char *buf)
40 {
41 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
42 
43 	return sysfs_emit(buf,
44 			  ctrl->passthru_err_log_enabled ? "on\n" : "off\n");
45 }
46 
nvme_adm_passthru_err_log_enabled_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)47 static ssize_t nvme_adm_passthru_err_log_enabled_store(struct device *dev,
48 		struct device_attribute *attr, const char *buf, size_t count)
49 {
50 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
51 	bool passthru_err_log_enabled;
52 	int err;
53 
54 	err = kstrtobool(buf, &passthru_err_log_enabled);
55 	if (err)
56 		return -EINVAL;
57 
58 	ctrl->passthru_err_log_enabled = passthru_err_log_enabled;
59 
60 	return count;
61 }
62 
dev_to_ns_head(struct device * dev)63 static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev)
64 {
65 	struct gendisk *disk = dev_to_disk(dev);
66 
67 	if (nvme_disk_is_ns_head(disk))
68 		return disk->private_data;
69 	return nvme_get_ns_from_dev(dev)->head;
70 }
71 
nvme_io_passthru_err_log_enabled_show(struct device * dev,struct device_attribute * attr,char * buf)72 static ssize_t nvme_io_passthru_err_log_enabled_show(struct device *dev,
73 		struct device_attribute *attr, char *buf)
74 {
75 	struct nvme_ns_head *head = dev_to_ns_head(dev);
76 
77 	return sysfs_emit(buf, head->passthru_err_log_enabled ? "on\n" : "off\n");
78 }
79 
nvme_io_passthru_err_log_enabled_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)80 static ssize_t nvme_io_passthru_err_log_enabled_store(struct device *dev,
81 		struct device_attribute *attr, const char *buf, size_t count)
82 {
83 	struct nvme_ns_head *head = dev_to_ns_head(dev);
84 	bool passthru_err_log_enabled;
85 	int err;
86 
87 	err = kstrtobool(buf, &passthru_err_log_enabled);
88 	if (err)
89 		return -EINVAL;
90 	head->passthru_err_log_enabled = passthru_err_log_enabled;
91 
92 	return count;
93 }
94 
95 static struct device_attribute dev_attr_adm_passthru_err_log_enabled = \
96 	__ATTR(passthru_err_log_enabled, S_IRUGO | S_IWUSR, \
97 	nvme_adm_passthru_err_log_enabled_show, nvme_adm_passthru_err_log_enabled_store);
98 
99 static struct device_attribute dev_attr_io_passthru_err_log_enabled = \
100 	__ATTR(passthru_err_log_enabled, S_IRUGO | S_IWUSR, \
101 	nvme_io_passthru_err_log_enabled_show, nvme_io_passthru_err_log_enabled_store);
102 
wwid_show(struct device * dev,struct device_attribute * attr,char * buf)103 static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
104 		char *buf)
105 {
106 	struct nvme_ns_head *head = dev_to_ns_head(dev);
107 	struct nvme_ns_ids *ids = &head->ids;
108 	struct nvme_subsystem *subsys = head->subsys;
109 	int serial_len = sizeof(subsys->serial);
110 	int model_len = sizeof(subsys->model);
111 
112 	if (!uuid_is_null(&ids->uuid))
113 		return sysfs_emit(buf, "uuid.%pU\n", &ids->uuid);
114 
115 	if (memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
116 		return sysfs_emit(buf, "eui.%16phN\n", ids->nguid);
117 
118 	if (memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
119 		return sysfs_emit(buf, "eui.%8phN\n", ids->eui64);
120 
121 	while (serial_len > 0 && (subsys->serial[serial_len - 1] == ' ' ||
122 				  subsys->serial[serial_len - 1] == '\0'))
123 		serial_len--;
124 	while (model_len > 0 && (subsys->model[model_len - 1] == ' ' ||
125 				 subsys->model[model_len - 1] == '\0'))
126 		model_len--;
127 
128 	return sysfs_emit(buf, "nvme.%04x-%*phN-%*phN-%08x\n", subsys->vendor_id,
129 		serial_len, subsys->serial, model_len, subsys->model,
130 		head->ns_id);
131 }
132 static DEVICE_ATTR_RO(wwid);
133 
nguid_show(struct device * dev,struct device_attribute * attr,char * buf)134 static ssize_t nguid_show(struct device *dev, struct device_attribute *attr,
135 		char *buf)
136 {
137 	return sysfs_emit(buf, "%pU\n", dev_to_ns_head(dev)->ids.nguid);
138 }
139 static DEVICE_ATTR_RO(nguid);
140 
uuid_show(struct device * dev,struct device_attribute * attr,char * buf)141 static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
142 		char *buf)
143 {
144 	struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids;
145 
146 	/* For backward compatibility expose the NGUID to userspace if
147 	 * we have no UUID set
148 	 */
149 	if (uuid_is_null(&ids->uuid)) {
150 		dev_warn_once(dev,
151 			"No UUID available providing old NGUID\n");
152 		return sysfs_emit(buf, "%pU\n", ids->nguid);
153 	}
154 	return sysfs_emit(buf, "%pU\n", &ids->uuid);
155 }
156 static DEVICE_ATTR_RO(uuid);
157 
eui_show(struct device * dev,struct device_attribute * attr,char * buf)158 static ssize_t eui_show(struct device *dev, struct device_attribute *attr,
159 		char *buf)
160 {
161 	return sysfs_emit(buf, "%8ph\n", dev_to_ns_head(dev)->ids.eui64);
162 }
163 static DEVICE_ATTR_RO(eui);
164 
nsid_show(struct device * dev,struct device_attribute * attr,char * buf)165 static ssize_t nsid_show(struct device *dev, struct device_attribute *attr,
166 		char *buf)
167 {
168 	return sysfs_emit(buf, "%d\n", dev_to_ns_head(dev)->ns_id);
169 }
170 static DEVICE_ATTR_RO(nsid);
171 
csi_show(struct device * dev,struct device_attribute * attr,char * buf)172 static ssize_t csi_show(struct device *dev, struct device_attribute *attr,
173 		char *buf)
174 {
175 	return sysfs_emit(buf, "%u\n", dev_to_ns_head(dev)->ids.csi);
176 }
177 static DEVICE_ATTR_RO(csi);
178 
metadata_bytes_show(struct device * dev,struct device_attribute * attr,char * buf)179 static ssize_t metadata_bytes_show(struct device *dev,
180 		struct device_attribute *attr, char *buf)
181 {
182 	return sysfs_emit(buf, "%u\n", dev_to_ns_head(dev)->ms);
183 }
184 static DEVICE_ATTR_RO(metadata_bytes);
185 
ns_head_update_nuse(struct nvme_ns_head * head)186 static int ns_head_update_nuse(struct nvme_ns_head *head)
187 {
188 	struct nvme_id_ns *id;
189 	struct nvme_ns *ns;
190 	int srcu_idx, ret = -EWOULDBLOCK;
191 
192 	/* Avoid issuing commands too often by rate limiting the update */
193 	if (!__ratelimit(&head->rs_nuse))
194 		return 0;
195 
196 	srcu_idx = srcu_read_lock(&head->srcu);
197 	ns = nvme_find_path(head);
198 	if (!ns)
199 		goto out_unlock;
200 
201 	ret = nvme_identify_ns(ns->ctrl, head->ns_id, &id);
202 	if (ret)
203 		goto out_unlock;
204 
205 	head->nuse = le64_to_cpu(id->nuse);
206 	kfree(id);
207 
208 out_unlock:
209 	srcu_read_unlock(&head->srcu, srcu_idx);
210 	return ret;
211 }
212 
ns_update_nuse(struct nvme_ns * ns)213 static int ns_update_nuse(struct nvme_ns *ns)
214 {
215 	struct nvme_id_ns *id;
216 	int ret;
217 
218 	/* Avoid issuing commands too often by rate limiting the update. */
219 	if (!__ratelimit(&ns->head->rs_nuse))
220 		return 0;
221 
222 	ret = nvme_identify_ns(ns->ctrl, ns->head->ns_id, &id);
223 	if (ret)
224 		return ret;
225 
226 	ns->head->nuse = le64_to_cpu(id->nuse);
227 	kfree(id);
228 	return 0;
229 }
230 
nuse_show(struct device * dev,struct device_attribute * attr,char * buf)231 static ssize_t nuse_show(struct device *dev, struct device_attribute *attr,
232 		char *buf)
233 {
234 	struct nvme_ns_head *head = dev_to_ns_head(dev);
235 	struct gendisk *disk = dev_to_disk(dev);
236 	int ret;
237 
238 	if (nvme_disk_is_ns_head(disk))
239 		ret = ns_head_update_nuse(head);
240 	else
241 		ret = ns_update_nuse(disk->private_data);
242 	if (ret)
243 		return ret;
244 
245 	return sysfs_emit(buf, "%llu\n", head->nuse);
246 }
247 static DEVICE_ATTR_RO(nuse);
248 
249 static struct attribute *nvme_ns_attrs[] = {
250 	&dev_attr_wwid.attr,
251 	&dev_attr_uuid.attr,
252 	&dev_attr_nguid.attr,
253 	&dev_attr_eui.attr,
254 	&dev_attr_csi.attr,
255 	&dev_attr_nsid.attr,
256 	&dev_attr_metadata_bytes.attr,
257 	&dev_attr_nuse.attr,
258 #ifdef CONFIG_NVME_MULTIPATH
259 	&dev_attr_ana_grpid.attr,
260 	&dev_attr_ana_state.attr,
261 	&dev_attr_queue_depth.attr,
262 	&dev_attr_numa_nodes.attr,
263 #endif
264 	&dev_attr_io_passthru_err_log_enabled.attr,
265 	NULL,
266 };
267 
nvme_ns_attrs_are_visible(struct kobject * kobj,struct attribute * a,int n)268 static umode_t nvme_ns_attrs_are_visible(struct kobject *kobj,
269 		struct attribute *a, int n)
270 {
271 	struct device *dev = container_of(kobj, struct device, kobj);
272 	struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids;
273 
274 	if (a == &dev_attr_uuid.attr) {
275 		if (uuid_is_null(&ids->uuid) &&
276 		    !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
277 			return 0;
278 	}
279 	if (a == &dev_attr_nguid.attr) {
280 		if (!memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
281 			return 0;
282 	}
283 	if (a == &dev_attr_eui.attr) {
284 		if (!memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
285 			return 0;
286 	}
287 #ifdef CONFIG_NVME_MULTIPATH
288 	if (a == &dev_attr_ana_grpid.attr || a == &dev_attr_ana_state.attr) {
289 		/* per-path attr */
290 		if (nvme_disk_is_ns_head(dev_to_disk(dev)))
291 			return 0;
292 		if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev)->ctrl))
293 			return 0;
294 	}
295 	if (a == &dev_attr_queue_depth.attr || a == &dev_attr_numa_nodes.attr) {
296 		if (nvme_disk_is_ns_head(dev_to_disk(dev)))
297 			return 0;
298 	}
299 #endif
300 	return a->mode;
301 }
302 
303 static const struct attribute_group nvme_ns_attr_group = {
304 	.attrs		= nvme_ns_attrs,
305 	.is_visible	= nvme_ns_attrs_are_visible,
306 };
307 
308 #ifdef CONFIG_NVME_MULTIPATH
309 /*
310  * NOTE: The dummy attribute does not appear in sysfs. It exists solely to allow
311  * control over the visibility of the multipath sysfs node. Without at least one
312  * attribute defined in nvme_ns_mpath_attrs[], the sysfs implementation does not
313  * invoke the multipath_sysfs_group_visible() method. As a result, we would not
314  * be able to control the visibility of the multipath sysfs node.
315  */
316 static struct attribute dummy_attr = {
317 	.name = "dummy",
318 };
319 
320 static struct attribute *nvme_ns_mpath_attrs[] = {
321 	&dummy_attr,
322 	NULL,
323 };
324 
multipath_sysfs_group_visible(struct kobject * kobj)325 static bool multipath_sysfs_group_visible(struct kobject *kobj)
326 {
327 	struct device *dev = container_of(kobj, struct device, kobj);
328 
329 	return nvme_disk_is_ns_head(dev_to_disk(dev));
330 }
331 
multipath_sysfs_attr_visible(struct kobject * kobj,struct attribute * attr,int n)332 static bool multipath_sysfs_attr_visible(struct kobject *kobj,
333 		struct attribute *attr, int n)
334 {
335 	return false;
336 }
337 
338 DEFINE_SYSFS_GROUP_VISIBLE(multipath_sysfs)
339 
340 const struct attribute_group nvme_ns_mpath_attr_group = {
341 	.name           = "multipath",
342 	.attrs		= nvme_ns_mpath_attrs,
343 	.is_visible     = SYSFS_GROUP_VISIBLE(multipath_sysfs),
344 };
345 #endif
346 
347 const struct attribute_group *nvme_ns_attr_groups[] = {
348 	&nvme_ns_attr_group,
349 #ifdef CONFIG_NVME_MULTIPATH
350 	&nvme_ns_mpath_attr_group,
351 #endif
352 	NULL,
353 };
354 
355 #define nvme_show_str_function(field)						\
356 static ssize_t  field##_show(struct device *dev,				\
357 			    struct device_attribute *attr, char *buf)		\
358 {										\
359         struct nvme_ctrl *ctrl = dev_get_drvdata(dev);				\
360         return sysfs_emit(buf, "%.*s\n",					\
361 		(int)sizeof(ctrl->subsys->field), ctrl->subsys->field);		\
362 }										\
363 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
364 
365 nvme_show_str_function(model);
366 nvme_show_str_function(serial);
367 nvme_show_str_function(firmware_rev);
368 
369 #define nvme_show_int_function(field)						\
370 static ssize_t  field##_show(struct device *dev,				\
371 			    struct device_attribute *attr, char *buf)		\
372 {										\
373         struct nvme_ctrl *ctrl = dev_get_drvdata(dev);				\
374         return sysfs_emit(buf, "%d\n", ctrl->field);				\
375 }										\
376 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
377 
378 nvme_show_int_function(cntlid);
379 nvme_show_int_function(numa_node);
380 nvme_show_int_function(queue_count);
381 nvme_show_int_function(sqsize);
382 nvme_show_int_function(kato);
383 
nvme_sysfs_delete(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)384 static ssize_t nvme_sysfs_delete(struct device *dev,
385 				struct device_attribute *attr, const char *buf,
386 				size_t count)
387 {
388 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
389 
390 	if (!test_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags))
391 		return -EBUSY;
392 
393 	if (device_remove_file_self(dev, attr))
394 		nvme_delete_ctrl_sync(ctrl);
395 	return count;
396 }
397 static DEVICE_ATTR(delete_controller, S_IWUSR, NULL, nvme_sysfs_delete);
398 
nvme_sysfs_show_transport(struct device * dev,struct device_attribute * attr,char * buf)399 static ssize_t nvme_sysfs_show_transport(struct device *dev,
400 					 struct device_attribute *attr,
401 					 char *buf)
402 {
403 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
404 
405 	return sysfs_emit(buf, "%s\n", ctrl->ops->name);
406 }
407 static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL);
408 
nvme_sysfs_show_state(struct device * dev,struct device_attribute * attr,char * buf)409 static ssize_t nvme_sysfs_show_state(struct device *dev,
410 				     struct device_attribute *attr,
411 				     char *buf)
412 {
413 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
414 	unsigned state = (unsigned)nvme_ctrl_state(ctrl);
415 	static const char *const state_name[] = {
416 		[NVME_CTRL_NEW]		= "new",
417 		[NVME_CTRL_LIVE]	= "live",
418 		[NVME_CTRL_RESETTING]	= "resetting",
419 		[NVME_CTRL_CONNECTING]	= "connecting",
420 		[NVME_CTRL_DELETING]	= "deleting",
421 		[NVME_CTRL_DELETING_NOIO]= "deleting (no IO)",
422 		[NVME_CTRL_DEAD]	= "dead",
423 	};
424 
425 	if (state < ARRAY_SIZE(state_name) && state_name[state])
426 		return sysfs_emit(buf, "%s\n", state_name[state]);
427 
428 	return sysfs_emit(buf, "unknown state\n");
429 }
430 
431 static DEVICE_ATTR(state, S_IRUGO, nvme_sysfs_show_state, NULL);
432 
nvme_sysfs_show_subsysnqn(struct device * dev,struct device_attribute * attr,char * buf)433 static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev,
434 					 struct device_attribute *attr,
435 					 char *buf)
436 {
437 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
438 
439 	return sysfs_emit(buf, "%s\n", ctrl->subsys->subnqn);
440 }
441 static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL);
442 
nvme_sysfs_show_hostnqn(struct device * dev,struct device_attribute * attr,char * buf)443 static ssize_t nvme_sysfs_show_hostnqn(struct device *dev,
444 					struct device_attribute *attr,
445 					char *buf)
446 {
447 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
448 
449 	return sysfs_emit(buf, "%s\n", ctrl->opts->host->nqn);
450 }
451 static DEVICE_ATTR(hostnqn, S_IRUGO, nvme_sysfs_show_hostnqn, NULL);
452 
nvme_sysfs_show_hostid(struct device * dev,struct device_attribute * attr,char * buf)453 static ssize_t nvme_sysfs_show_hostid(struct device *dev,
454 					struct device_attribute *attr,
455 					char *buf)
456 {
457 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
458 
459 	return sysfs_emit(buf, "%pU\n", &ctrl->opts->host->id);
460 }
461 static DEVICE_ATTR(hostid, S_IRUGO, nvme_sysfs_show_hostid, NULL);
462 
nvme_sysfs_show_address(struct device * dev,struct device_attribute * attr,char * buf)463 static ssize_t nvme_sysfs_show_address(struct device *dev,
464 					 struct device_attribute *attr,
465 					 char *buf)
466 {
467 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
468 
469 	return ctrl->ops->get_address(ctrl, buf, PAGE_SIZE);
470 }
471 static DEVICE_ATTR(address, S_IRUGO, nvme_sysfs_show_address, NULL);
472 
nvme_ctrl_loss_tmo_show(struct device * dev,struct device_attribute * attr,char * buf)473 static ssize_t nvme_ctrl_loss_tmo_show(struct device *dev,
474 		struct device_attribute *attr, char *buf)
475 {
476 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
477 	struct nvmf_ctrl_options *opts = ctrl->opts;
478 
479 	if (ctrl->opts->max_reconnects == -1)
480 		return sysfs_emit(buf, "off\n");
481 	return sysfs_emit(buf, "%d\n",
482 			  opts->max_reconnects * opts->reconnect_delay);
483 }
484 
nvme_ctrl_loss_tmo_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)485 static ssize_t nvme_ctrl_loss_tmo_store(struct device *dev,
486 		struct device_attribute *attr, const char *buf, size_t count)
487 {
488 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
489 	struct nvmf_ctrl_options *opts = ctrl->opts;
490 	int ctrl_loss_tmo, err;
491 
492 	err = kstrtoint(buf, 10, &ctrl_loss_tmo);
493 	if (err)
494 		return -EINVAL;
495 
496 	if (ctrl_loss_tmo < 0)
497 		opts->max_reconnects = -1;
498 	else
499 		opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo,
500 						opts->reconnect_delay);
501 	return count;
502 }
503 static DEVICE_ATTR(ctrl_loss_tmo, S_IRUGO | S_IWUSR,
504 	nvme_ctrl_loss_tmo_show, nvme_ctrl_loss_tmo_store);
505 
nvme_ctrl_reconnect_delay_show(struct device * dev,struct device_attribute * attr,char * buf)506 static ssize_t nvme_ctrl_reconnect_delay_show(struct device *dev,
507 		struct device_attribute *attr, char *buf)
508 {
509 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
510 
511 	if (ctrl->opts->reconnect_delay == -1)
512 		return sysfs_emit(buf, "off\n");
513 	return sysfs_emit(buf, "%d\n", ctrl->opts->reconnect_delay);
514 }
515 
nvme_ctrl_reconnect_delay_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)516 static ssize_t nvme_ctrl_reconnect_delay_store(struct device *dev,
517 		struct device_attribute *attr, const char *buf, size_t count)
518 {
519 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
520 	unsigned int v;
521 	int err;
522 
523 	err = kstrtou32(buf, 10, &v);
524 	if (err)
525 		return err;
526 
527 	ctrl->opts->reconnect_delay = v;
528 	return count;
529 }
530 static DEVICE_ATTR(reconnect_delay, S_IRUGO | S_IWUSR,
531 	nvme_ctrl_reconnect_delay_show, nvme_ctrl_reconnect_delay_store);
532 
nvme_ctrl_fast_io_fail_tmo_show(struct device * dev,struct device_attribute * attr,char * buf)533 static ssize_t nvme_ctrl_fast_io_fail_tmo_show(struct device *dev,
534 		struct device_attribute *attr, char *buf)
535 {
536 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
537 
538 	if (ctrl->opts->fast_io_fail_tmo == -1)
539 		return sysfs_emit(buf, "off\n");
540 	return sysfs_emit(buf, "%d\n", ctrl->opts->fast_io_fail_tmo);
541 }
542 
nvme_ctrl_fast_io_fail_tmo_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)543 static ssize_t nvme_ctrl_fast_io_fail_tmo_store(struct device *dev,
544 		struct device_attribute *attr, const char *buf, size_t count)
545 {
546 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
547 	struct nvmf_ctrl_options *opts = ctrl->opts;
548 	int fast_io_fail_tmo, err;
549 
550 	err = kstrtoint(buf, 10, &fast_io_fail_tmo);
551 	if (err)
552 		return -EINVAL;
553 
554 	if (fast_io_fail_tmo < 0)
555 		opts->fast_io_fail_tmo = -1;
556 	else
557 		opts->fast_io_fail_tmo = fast_io_fail_tmo;
558 	return count;
559 }
560 static DEVICE_ATTR(fast_io_fail_tmo, S_IRUGO | S_IWUSR,
561 	nvme_ctrl_fast_io_fail_tmo_show, nvme_ctrl_fast_io_fail_tmo_store);
562 
cntrltype_show(struct device * dev,struct device_attribute * attr,char * buf)563 static ssize_t cntrltype_show(struct device *dev,
564 			      struct device_attribute *attr, char *buf)
565 {
566 	static const char * const type[] = {
567 		[NVME_CTRL_IO] = "io\n",
568 		[NVME_CTRL_DISC] = "discovery\n",
569 		[NVME_CTRL_ADMIN] = "admin\n",
570 	};
571 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
572 
573 	if (ctrl->cntrltype > NVME_CTRL_ADMIN || !type[ctrl->cntrltype])
574 		return sysfs_emit(buf, "reserved\n");
575 
576 	return sysfs_emit(buf, type[ctrl->cntrltype]);
577 }
578 static DEVICE_ATTR_RO(cntrltype);
579 
dctype_show(struct device * dev,struct device_attribute * attr,char * buf)580 static ssize_t dctype_show(struct device *dev,
581 			   struct device_attribute *attr, char *buf)
582 {
583 	static const char * const type[] = {
584 		[NVME_DCTYPE_NOT_REPORTED] = "none\n",
585 		[NVME_DCTYPE_DDC] = "ddc\n",
586 		[NVME_DCTYPE_CDC] = "cdc\n",
587 	};
588 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
589 
590 	if (ctrl->dctype > NVME_DCTYPE_CDC || !type[ctrl->dctype])
591 		return sysfs_emit(buf, "reserved\n");
592 
593 	return sysfs_emit(buf, type[ctrl->dctype]);
594 }
595 static DEVICE_ATTR_RO(dctype);
596 
597 #ifdef CONFIG_NVME_HOST_AUTH
nvme_ctrl_dhchap_secret_show(struct device * dev,struct device_attribute * attr,char * buf)598 static ssize_t nvme_ctrl_dhchap_secret_show(struct device *dev,
599 		struct device_attribute *attr, char *buf)
600 {
601 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
602 	struct nvmf_ctrl_options *opts = ctrl->opts;
603 
604 	if (!opts->dhchap_secret)
605 		return sysfs_emit(buf, "none\n");
606 	return sysfs_emit(buf, "%s\n", opts->dhchap_secret);
607 }
608 
nvme_ctrl_dhchap_secret_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)609 static ssize_t nvme_ctrl_dhchap_secret_store(struct device *dev,
610 		struct device_attribute *attr, const char *buf, size_t count)
611 {
612 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
613 	struct nvmf_ctrl_options *opts = ctrl->opts;
614 	char *dhchap_secret;
615 
616 	if (!ctrl->opts->dhchap_secret)
617 		return -EINVAL;
618 	if (count < 7)
619 		return -EINVAL;
620 	if (memcmp(buf, "DHHC-1:", 7))
621 		return -EINVAL;
622 
623 	dhchap_secret = kzalloc(count + 1, GFP_KERNEL);
624 	if (!dhchap_secret)
625 		return -ENOMEM;
626 	memcpy(dhchap_secret, buf, count);
627 	nvme_auth_stop(ctrl);
628 	if (strcmp(dhchap_secret, opts->dhchap_secret)) {
629 		struct nvme_dhchap_key *key, *host_key;
630 		int ret;
631 
632 		ret = nvme_auth_generate_key(dhchap_secret, &key);
633 		if (ret) {
634 			kfree(dhchap_secret);
635 			return ret;
636 		}
637 		kfree(opts->dhchap_secret);
638 		opts->dhchap_secret = dhchap_secret;
639 		host_key = ctrl->host_key;
640 		mutex_lock(&ctrl->dhchap_auth_mutex);
641 		ctrl->host_key = key;
642 		mutex_unlock(&ctrl->dhchap_auth_mutex);
643 		nvme_auth_free_key(host_key);
644 	} else
645 		kfree(dhchap_secret);
646 	/* Start re-authentication */
647 	dev_info(ctrl->device, "re-authenticating controller\n");
648 	queue_work(nvme_wq, &ctrl->dhchap_auth_work);
649 
650 	return count;
651 }
652 
653 static DEVICE_ATTR(dhchap_secret, S_IRUGO | S_IWUSR,
654 	nvme_ctrl_dhchap_secret_show, nvme_ctrl_dhchap_secret_store);
655 
nvme_ctrl_dhchap_ctrl_secret_show(struct device * dev,struct device_attribute * attr,char * buf)656 static ssize_t nvme_ctrl_dhchap_ctrl_secret_show(struct device *dev,
657 		struct device_attribute *attr, char *buf)
658 {
659 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
660 	struct nvmf_ctrl_options *opts = ctrl->opts;
661 
662 	if (!opts->dhchap_ctrl_secret)
663 		return sysfs_emit(buf, "none\n");
664 	return sysfs_emit(buf, "%s\n", opts->dhchap_ctrl_secret);
665 }
666 
nvme_ctrl_dhchap_ctrl_secret_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)667 static ssize_t nvme_ctrl_dhchap_ctrl_secret_store(struct device *dev,
668 		struct device_attribute *attr, const char *buf, size_t count)
669 {
670 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
671 	struct nvmf_ctrl_options *opts = ctrl->opts;
672 	char *dhchap_secret;
673 
674 	if (!ctrl->opts->dhchap_ctrl_secret)
675 		return -EINVAL;
676 	if (count < 7)
677 		return -EINVAL;
678 	if (memcmp(buf, "DHHC-1:", 7))
679 		return -EINVAL;
680 
681 	dhchap_secret = kzalloc(count + 1, GFP_KERNEL);
682 	if (!dhchap_secret)
683 		return -ENOMEM;
684 	memcpy(dhchap_secret, buf, count);
685 	nvme_auth_stop(ctrl);
686 	if (strcmp(dhchap_secret, opts->dhchap_ctrl_secret)) {
687 		struct nvme_dhchap_key *key, *ctrl_key;
688 		int ret;
689 
690 		ret = nvme_auth_generate_key(dhchap_secret, &key);
691 		if (ret) {
692 			kfree(dhchap_secret);
693 			return ret;
694 		}
695 		kfree(opts->dhchap_ctrl_secret);
696 		opts->dhchap_ctrl_secret = dhchap_secret;
697 		ctrl_key = ctrl->ctrl_key;
698 		mutex_lock(&ctrl->dhchap_auth_mutex);
699 		ctrl->ctrl_key = key;
700 		mutex_unlock(&ctrl->dhchap_auth_mutex);
701 		nvme_auth_free_key(ctrl_key);
702 	} else
703 		kfree(dhchap_secret);
704 	/* Start re-authentication */
705 	dev_info(ctrl->device, "re-authenticating controller\n");
706 	queue_work(nvme_wq, &ctrl->dhchap_auth_work);
707 
708 	return count;
709 }
710 
711 static DEVICE_ATTR(dhchap_ctrl_secret, S_IRUGO | S_IWUSR,
712 	nvme_ctrl_dhchap_ctrl_secret_show, nvme_ctrl_dhchap_ctrl_secret_store);
713 #endif
714 
715 static struct attribute *nvme_dev_attrs[] = {
716 	&dev_attr_reset_controller.attr,
717 	&dev_attr_rescan_controller.attr,
718 	&dev_attr_model.attr,
719 	&dev_attr_serial.attr,
720 	&dev_attr_firmware_rev.attr,
721 	&dev_attr_cntlid.attr,
722 	&dev_attr_delete_controller.attr,
723 	&dev_attr_transport.attr,
724 	&dev_attr_subsysnqn.attr,
725 	&dev_attr_address.attr,
726 	&dev_attr_state.attr,
727 	&dev_attr_numa_node.attr,
728 	&dev_attr_queue_count.attr,
729 	&dev_attr_sqsize.attr,
730 	&dev_attr_hostnqn.attr,
731 	&dev_attr_hostid.attr,
732 	&dev_attr_ctrl_loss_tmo.attr,
733 	&dev_attr_reconnect_delay.attr,
734 	&dev_attr_fast_io_fail_tmo.attr,
735 	&dev_attr_kato.attr,
736 	&dev_attr_cntrltype.attr,
737 	&dev_attr_dctype.attr,
738 #ifdef CONFIG_NVME_HOST_AUTH
739 	&dev_attr_dhchap_secret.attr,
740 	&dev_attr_dhchap_ctrl_secret.attr,
741 #endif
742 	&dev_attr_adm_passthru_err_log_enabled.attr,
743 	NULL
744 };
745 
nvme_dev_attrs_are_visible(struct kobject * kobj,struct attribute * a,int n)746 static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj,
747 		struct attribute *a, int n)
748 {
749 	struct device *dev = container_of(kobj, struct device, kobj);
750 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
751 
752 	if (a == &dev_attr_delete_controller.attr && !ctrl->ops->delete_ctrl)
753 		return 0;
754 	if (a == &dev_attr_address.attr && !ctrl->ops->get_address)
755 		return 0;
756 	if (a == &dev_attr_hostnqn.attr && !ctrl->opts)
757 		return 0;
758 	if (a == &dev_attr_hostid.attr && !ctrl->opts)
759 		return 0;
760 	if (a == &dev_attr_ctrl_loss_tmo.attr && !ctrl->opts)
761 		return 0;
762 	if (a == &dev_attr_reconnect_delay.attr && !ctrl->opts)
763 		return 0;
764 	if (a == &dev_attr_fast_io_fail_tmo.attr && !ctrl->opts)
765 		return 0;
766 #ifdef CONFIG_NVME_HOST_AUTH
767 	if (a == &dev_attr_dhchap_secret.attr && !ctrl->opts)
768 		return 0;
769 	if (a == &dev_attr_dhchap_ctrl_secret.attr && !ctrl->opts)
770 		return 0;
771 #endif
772 
773 	return a->mode;
774 }
775 
776 const struct attribute_group nvme_dev_attrs_group = {
777 	.attrs		= nvme_dev_attrs,
778 	.is_visible	= nvme_dev_attrs_are_visible,
779 };
780 EXPORT_SYMBOL_GPL(nvme_dev_attrs_group);
781 
782 #ifdef CONFIG_NVME_TCP_TLS
tls_key_show(struct device * dev,struct device_attribute * attr,char * buf)783 static ssize_t tls_key_show(struct device *dev,
784 			    struct device_attribute *attr, char *buf)
785 {
786 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
787 
788 	if (!ctrl->tls_pskid)
789 		return 0;
790 	return sysfs_emit(buf, "%08x\n", ctrl->tls_pskid);
791 }
792 static DEVICE_ATTR_RO(tls_key);
793 
tls_configured_key_show(struct device * dev,struct device_attribute * attr,char * buf)794 static ssize_t tls_configured_key_show(struct device *dev,
795 		struct device_attribute *attr, char *buf)
796 {
797 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
798 	struct key *key = ctrl->opts->tls_key;
799 
800 	return sysfs_emit(buf, "%08x\n", key_serial(key));
801 }
802 static DEVICE_ATTR_RO(tls_configured_key);
803 
tls_keyring_show(struct device * dev,struct device_attribute * attr,char * buf)804 static ssize_t tls_keyring_show(struct device *dev,
805 		struct device_attribute *attr, char *buf)
806 {
807 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
808 	struct key *keyring = ctrl->opts->keyring;
809 
810 	return sysfs_emit(buf, "%s\n", keyring->description);
811 }
812 static DEVICE_ATTR_RO(tls_keyring);
813 
814 static struct attribute *nvme_tls_attrs[] = {
815 	&dev_attr_tls_key.attr,
816 	&dev_attr_tls_configured_key.attr,
817 	&dev_attr_tls_keyring.attr,
818 	NULL,
819 };
820 
nvme_tls_attrs_are_visible(struct kobject * kobj,struct attribute * a,int n)821 static umode_t nvme_tls_attrs_are_visible(struct kobject *kobj,
822 		struct attribute *a, int n)
823 {
824 	struct device *dev = container_of(kobj, struct device, kobj);
825 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
826 
827 	if (!ctrl->opts || strcmp(ctrl->opts->transport, "tcp"))
828 		return 0;
829 
830 	if (a == &dev_attr_tls_key.attr &&
831 	    !ctrl->opts->tls && !ctrl->opts->concat)
832 		return 0;
833 	if (a == &dev_attr_tls_configured_key.attr &&
834 	    (!ctrl->opts->tls_key || ctrl->opts->concat))
835 		return 0;
836 	if (a == &dev_attr_tls_keyring.attr &&
837 	    !ctrl->opts->keyring)
838 		return 0;
839 
840 	return a->mode;
841 }
842 
843 static const struct attribute_group nvme_tls_attrs_group = {
844 	.attrs		= nvme_tls_attrs,
845 	.is_visible	= nvme_tls_attrs_are_visible,
846 };
847 #endif
848 
849 const struct attribute_group *nvme_dev_attr_groups[] = {
850 	&nvme_dev_attrs_group,
851 #ifdef CONFIG_NVME_TCP_TLS
852 	&nvme_tls_attrs_group,
853 #endif
854 	NULL,
855 };
856 
857 #define SUBSYS_ATTR_RO(_name, _mode, _show)			\
858 	struct device_attribute subsys_attr_##_name = \
859 		__ATTR(_name, _mode, _show, NULL)
860 
nvme_subsys_show_nqn(struct device * dev,struct device_attribute * attr,char * buf)861 static ssize_t nvme_subsys_show_nqn(struct device *dev,
862 				    struct device_attribute *attr,
863 				    char *buf)
864 {
865 	struct nvme_subsystem *subsys =
866 		container_of(dev, struct nvme_subsystem, dev);
867 
868 	return sysfs_emit(buf, "%s\n", subsys->subnqn);
869 }
870 static SUBSYS_ATTR_RO(subsysnqn, S_IRUGO, nvme_subsys_show_nqn);
871 
nvme_subsys_show_type(struct device * dev,struct device_attribute * attr,char * buf)872 static ssize_t nvme_subsys_show_type(struct device *dev,
873 				    struct device_attribute *attr,
874 				    char *buf)
875 {
876 	struct nvme_subsystem *subsys =
877 		container_of(dev, struct nvme_subsystem, dev);
878 
879 	switch (subsys->subtype) {
880 	case NVME_NQN_DISC:
881 		return sysfs_emit(buf, "discovery\n");
882 	case NVME_NQN_NVME:
883 		return sysfs_emit(buf, "nvm\n");
884 	default:
885 		return sysfs_emit(buf, "reserved\n");
886 	}
887 }
888 static SUBSYS_ATTR_RO(subsystype, S_IRUGO, nvme_subsys_show_type);
889 
890 #define nvme_subsys_show_str_function(field)				\
891 static ssize_t subsys_##field##_show(struct device *dev,		\
892 			    struct device_attribute *attr, char *buf)	\
893 {									\
894 	struct nvme_subsystem *subsys =					\
895 		container_of(dev, struct nvme_subsystem, dev);		\
896 	return sysfs_emit(buf, "%.*s\n",				\
897 			   (int)sizeof(subsys->field), subsys->field);	\
898 }									\
899 static SUBSYS_ATTR_RO(field, S_IRUGO, subsys_##field##_show);
900 
901 nvme_subsys_show_str_function(model);
902 nvme_subsys_show_str_function(serial);
903 nvme_subsys_show_str_function(firmware_rev);
904 
905 static struct attribute *nvme_subsys_attrs[] = {
906 	&subsys_attr_model.attr,
907 	&subsys_attr_serial.attr,
908 	&subsys_attr_firmware_rev.attr,
909 	&subsys_attr_subsysnqn.attr,
910 	&subsys_attr_subsystype.attr,
911 #ifdef CONFIG_NVME_MULTIPATH
912 	&subsys_attr_iopolicy.attr,
913 #endif
914 	NULL,
915 };
916 
917 static const struct attribute_group nvme_subsys_attrs_group = {
918 	.attrs = nvme_subsys_attrs,
919 };
920 
921 const struct attribute_group *nvme_subsys_attrs_groups[] = {
922 	&nvme_subsys_attrs_group,
923 	NULL,
924 };
925