1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * kernel/ksysfs.c - sysfs attributes in /sys/kernel, which
4 * are not related to any other subsystem
5 *
6 * Copyright (C) 2004 Kay Sievers <kay.sievers@vrfy.org>
7 */
8
9 #include <asm/byteorder.h>
10 #include <linux/kobject.h>
11 #include <linux/ksysfs.h>
12 #include <linux/string.h>
13 #include <linux/sysfs.h>
14 #include <linux/export.h>
15 #include <linux/init.h>
16 #include <linux/vmcore_info.h>
17 #include <linux/profile.h>
18 #include <linux/stat.h>
19 #include <linux/sched.h>
20 #include <linux/capability.h>
21 #include <linux/compiler.h>
22
23 #include <linux/rcupdate.h> /* rcu_expedited and rcu_normal */
24
25 #if defined(__LITTLE_ENDIAN)
26 #define CPU_BYTEORDER_STRING "little"
27 #elif defined(__BIG_ENDIAN)
28 #define CPU_BYTEORDER_STRING "big"
29 #else
30 #error Unknown byteorder
31 #endif
32
33 #define KERNEL_ATTR_RO(_name) \
34 static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
35
36 #define KERNEL_ATTR_RW(_name) \
37 static struct kobj_attribute _name##_attr = __ATTR_RW(_name)
38
39 /* current uevent sequence number */
uevent_seqnum_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)40 static ssize_t uevent_seqnum_show(struct kobject *kobj,
41 struct kobj_attribute *attr, char *buf)
42 {
43 return sysfs_emit(buf, "%llu\n", (u64)atomic64_read(&uevent_seqnum));
44 }
45 KERNEL_ATTR_RO(uevent_seqnum);
46
47 /* cpu byteorder */
cpu_byteorder_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)48 static ssize_t cpu_byteorder_show(struct kobject *kobj,
49 struct kobj_attribute *attr, char *buf)
50 {
51 return sysfs_emit(buf, "%s\n", CPU_BYTEORDER_STRING);
52 }
53 KERNEL_ATTR_RO(cpu_byteorder);
54
55 /* address bits */
address_bits_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)56 static ssize_t address_bits_show(struct kobject *kobj,
57 struct kobj_attribute *attr, char *buf)
58 {
59 return sysfs_emit(buf, "%zu\n", sizeof(void *) * 8 /* CHAR_BIT */);
60 }
61 KERNEL_ATTR_RO(address_bits);
62
63 #ifdef CONFIG_UEVENT_HELPER
64 /* uevent helper program, used during early boot */
uevent_helper_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)65 static ssize_t uevent_helper_show(struct kobject *kobj,
66 struct kobj_attribute *attr, char *buf)
67 {
68 return sysfs_emit(buf, "%s\n", uevent_helper);
69 }
uevent_helper_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)70 static ssize_t uevent_helper_store(struct kobject *kobj,
71 struct kobj_attribute *attr,
72 const char *buf, size_t count)
73 {
74 if (count+1 > UEVENT_HELPER_PATH_LEN)
75 return -ENOENT;
76 memcpy(uevent_helper, buf, count);
77 uevent_helper[count] = '\0';
78 if (count && uevent_helper[count-1] == '\n')
79 uevent_helper[count-1] = '\0';
80 return count;
81 }
82 KERNEL_ATTR_RW(uevent_helper);
83 #endif
84
85 #ifdef CONFIG_PROFILING
profiling_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)86 static ssize_t profiling_show(struct kobject *kobj,
87 struct kobj_attribute *attr, char *buf)
88 {
89 return sysfs_emit(buf, "%d\n", prof_on);
90 }
profiling_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)91 static ssize_t profiling_store(struct kobject *kobj,
92 struct kobj_attribute *attr,
93 const char *buf, size_t count)
94 {
95 int ret;
96 static DEFINE_MUTEX(lock);
97
98 /*
99 * We need serialization, for profile_setup() initializes prof_on
100 * value and profile_init() must not reallocate prof_buffer after
101 * once allocated.
102 */
103 guard(mutex)(&lock);
104 if (prof_on)
105 return -EEXIST;
106 /*
107 * This eventually calls into get_option() which
108 * has a ton of callers and is not const. It is
109 * easiest to cast it away here.
110 */
111 profile_setup((char *)buf);
112 ret = profile_init();
113 if (ret)
114 return ret;
115 ret = create_proc_profile();
116 if (ret)
117 return ret;
118 return count;
119 }
120 KERNEL_ATTR_RW(profiling);
121 #endif
122
123 #ifdef CONFIG_VMCORE_INFO
124
vmcoreinfo_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)125 static ssize_t vmcoreinfo_show(struct kobject *kobj,
126 struct kobj_attribute *attr, char *buf)
127 {
128 phys_addr_t vmcore_base = paddr_vmcoreinfo_note();
129 return sysfs_emit(buf, "%pa %x\n", &vmcore_base,
130 (unsigned int)VMCOREINFO_NOTE_SIZE);
131 }
132 KERNEL_ATTR_RO(vmcoreinfo);
133
134 #endif /* CONFIG_VMCORE_INFO */
135
136 /* whether file capabilities are enabled */
fscaps_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)137 static ssize_t fscaps_show(struct kobject *kobj,
138 struct kobj_attribute *attr, char *buf)
139 {
140 return sysfs_emit(buf, "%d\n", file_caps_enabled);
141 }
142 KERNEL_ATTR_RO(fscaps);
143
144 #ifndef CONFIG_TINY_RCU
145 int rcu_expedited;
rcu_expedited_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)146 static ssize_t rcu_expedited_show(struct kobject *kobj,
147 struct kobj_attribute *attr, char *buf)
148 {
149 return sysfs_emit(buf, "%d\n", READ_ONCE(rcu_expedited));
150 }
rcu_expedited_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)151 static ssize_t rcu_expedited_store(struct kobject *kobj,
152 struct kobj_attribute *attr,
153 const char *buf, size_t count)
154 {
155 if (kstrtoint(buf, 0, &rcu_expedited))
156 return -EINVAL;
157
158 return count;
159 }
160 KERNEL_ATTR_RW(rcu_expedited);
161
162 int rcu_normal;
rcu_normal_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)163 static ssize_t rcu_normal_show(struct kobject *kobj,
164 struct kobj_attribute *attr, char *buf)
165 {
166 return sysfs_emit(buf, "%d\n", READ_ONCE(rcu_normal));
167 }
rcu_normal_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)168 static ssize_t rcu_normal_store(struct kobject *kobj,
169 struct kobj_attribute *attr,
170 const char *buf, size_t count)
171 {
172 if (kstrtoint(buf, 0, &rcu_normal))
173 return -EINVAL;
174
175 return count;
176 }
177 KERNEL_ATTR_RW(rcu_normal);
178 #endif /* #ifndef CONFIG_TINY_RCU */
179
180 /*
181 * Make /sys/kernel/notes give the raw contents of our kernel .notes section.
182 */
183 extern const void __start_notes;
184 extern const void __stop_notes;
185 #define notes_size (&__stop_notes - &__start_notes)
186
187 static __ro_after_init BIN_ATTR_SIMPLE_RO(notes);
188
189 struct kobject *kernel_kobj;
190 EXPORT_SYMBOL_GPL(kernel_kobj);
191
192 static struct attribute * kernel_attrs[] = {
193 &fscaps_attr.attr,
194 &uevent_seqnum_attr.attr,
195 &cpu_byteorder_attr.attr,
196 &address_bits_attr.attr,
197 #ifdef CONFIG_UEVENT_HELPER
198 &uevent_helper_attr.attr,
199 #endif
200 #ifdef CONFIG_PROFILING
201 &profiling_attr.attr,
202 #endif
203 #ifdef CONFIG_VMCORE_INFO
204 &vmcoreinfo_attr.attr,
205 #endif
206 #ifndef CONFIG_TINY_RCU
207 &rcu_expedited_attr.attr,
208 &rcu_normal_attr.attr,
209 #endif
210 NULL
211 };
212
213 static const struct attribute_group kernel_attr_group = {
214 .attrs = kernel_attrs,
215 };
216
ksysfs_init(void)217 void __init ksysfs_init(void)
218 {
219 int error;
220
221 kernel_kobj = kobject_create_and_add("kernel", NULL);
222 if (!kernel_kobj) {
223 error = -ENOMEM;
224 goto exit;
225 }
226 error = sysfs_create_group(kernel_kobj, &kernel_attr_group);
227 if (error)
228 goto kset_exit;
229
230 if (notes_size > 0) {
231 bin_attr_notes.private = (void *)&__start_notes;
232 bin_attr_notes.size = notes_size;
233 error = sysfs_create_bin_file(kernel_kobj, &bin_attr_notes);
234 if (error)
235 goto group_exit;
236 }
237
238 return;
239
240 group_exit:
241 sysfs_remove_group(kernel_kobj, &kernel_attr_group);
242 kset_exit:
243 kobject_put(kernel_kobj);
244 exit:
245 pr_err("failed to initialize the kernel kobject: %d\n", error);
246 }
247