1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Memory subsystem support
4  *
5  * Written by Matt Tolentino <matthew.e.tolentino@intel.com>
6  *            Dave Hansen <haveblue@us.ibm.com>
7  *
8  * This file provides the necessary infrastructure to represent
9  * a SPARSEMEM-memory-model system's physical memory in /sysfs.
10  * All arch-independent code that assumes MEMORY_HOTPLUG requires
11  * SPARSEMEM should be contained here, or in mm/memory_hotplug.c.
12  */
13 
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/topology.h>
17 #include <linux/capability.h>
18 #include <linux/device.h>
19 #include <linux/memory.h>
20 #include <linux/memory_hotplug.h>
21 #include <linux/mm.h>
22 #include <linux/stat.h>
23 #include <linux/slab.h>
24 #include <linux/xarray.h>
25 
26 #include <linux/atomic.h>
27 #include <linux/uaccess.h>
28 
29 #define MEMORY_CLASS_NAME	"memory"
30 
31 static const char *const online_type_to_str[] = {
32 	[MMOP_OFFLINE] = "offline",
33 	[MMOP_ONLINE] = "online",
34 	[MMOP_ONLINE_KERNEL] = "online_kernel",
35 	[MMOP_ONLINE_MOVABLE] = "online_movable",
36 };
37 
mhp_online_type_from_str(const char * str)38 int mhp_online_type_from_str(const char *str)
39 {
40 	int i;
41 
42 	for (i = 0; i < ARRAY_SIZE(online_type_to_str); i++) {
43 		if (sysfs_streq(str, online_type_to_str[i]))
44 			return i;
45 	}
46 	return -EINVAL;
47 }
48 
49 #define to_memory_block(dev) container_of(dev, struct memory_block, dev)
50 
51 static int sections_per_block;
52 
memory_block_id(unsigned long section_nr)53 static inline unsigned long memory_block_id(unsigned long section_nr)
54 {
55 	return section_nr / sections_per_block;
56 }
57 
pfn_to_block_id(unsigned long pfn)58 static inline unsigned long pfn_to_block_id(unsigned long pfn)
59 {
60 	return memory_block_id(pfn_to_section_nr(pfn));
61 }
62 
phys_to_block_id(unsigned long phys)63 static inline unsigned long phys_to_block_id(unsigned long phys)
64 {
65 	return pfn_to_block_id(PFN_DOWN(phys));
66 }
67 
68 static int memory_subsys_online(struct device *dev);
69 static int memory_subsys_offline(struct device *dev);
70 
71 static const struct bus_type memory_subsys = {
72 	.name = MEMORY_CLASS_NAME,
73 	.dev_name = MEMORY_CLASS_NAME,
74 	.online = memory_subsys_online,
75 	.offline = memory_subsys_offline,
76 };
77 
78 /*
79  * Memory blocks are cached in a local radix tree to avoid
80  * a costly linear search for the corresponding device on
81  * the subsystem bus.
82  */
83 static DEFINE_XARRAY(memory_blocks);
84 
85 /*
86  * Memory groups, indexed by memory group id (mgid).
87  */
88 static DEFINE_XARRAY_FLAGS(memory_groups, XA_FLAGS_ALLOC);
89 #define MEMORY_GROUP_MARK_DYNAMIC	XA_MARK_1
90 
91 static BLOCKING_NOTIFIER_HEAD(memory_chain);
92 
register_memory_notifier(struct notifier_block * nb)93 int register_memory_notifier(struct notifier_block *nb)
94 {
95 	return blocking_notifier_chain_register(&memory_chain, nb);
96 }
97 EXPORT_SYMBOL(register_memory_notifier);
98 
unregister_memory_notifier(struct notifier_block * nb)99 void unregister_memory_notifier(struct notifier_block *nb)
100 {
101 	blocking_notifier_chain_unregister(&memory_chain, nb);
102 }
103 EXPORT_SYMBOL(unregister_memory_notifier);
104 
memory_block_release(struct device * dev)105 static void memory_block_release(struct device *dev)
106 {
107 	struct memory_block *mem = to_memory_block(dev);
108 	/* Verify that the altmap is freed */
109 	WARN_ON(mem->altmap);
110 	kfree(mem);
111 }
112 
memory_block_size_bytes(void)113 unsigned long __weak memory_block_size_bytes(void)
114 {
115 	return MIN_MEMORY_BLOCK_SIZE;
116 }
117 EXPORT_SYMBOL_GPL(memory_block_size_bytes);
118 
119 /* Show the memory block ID, relative to the memory block size */
phys_index_show(struct device * dev,struct device_attribute * attr,char * buf)120 static ssize_t phys_index_show(struct device *dev,
121 			       struct device_attribute *attr, char *buf)
122 {
123 	struct memory_block *mem = to_memory_block(dev);
124 
125 	return sysfs_emit(buf, "%08lx\n", memory_block_id(mem->start_section_nr));
126 }
127 
128 /*
129  * Legacy interface that we cannot remove. Always indicate "removable"
130  * with CONFIG_MEMORY_HOTREMOVE - bad heuristic.
131  */
removable_show(struct device * dev,struct device_attribute * attr,char * buf)132 static ssize_t removable_show(struct device *dev, struct device_attribute *attr,
133 			      char *buf)
134 {
135 	return sysfs_emit(buf, "%d\n", (int)IS_ENABLED(CONFIG_MEMORY_HOTREMOVE));
136 }
137 
138 /*
139  * online, offline, going offline, etc.
140  */
state_show(struct device * dev,struct device_attribute * attr,char * buf)141 static ssize_t state_show(struct device *dev, struct device_attribute *attr,
142 			  char *buf)
143 {
144 	struct memory_block *mem = to_memory_block(dev);
145 	const char *output;
146 
147 	/*
148 	 * We can probably put these states in a nice little array
149 	 * so that they're not open-coded
150 	 */
151 	switch (mem->state) {
152 	case MEM_ONLINE:
153 		output = "online";
154 		break;
155 	case MEM_OFFLINE:
156 		output = "offline";
157 		break;
158 	case MEM_GOING_OFFLINE:
159 		output = "going-offline";
160 		break;
161 	default:
162 		WARN_ON(1);
163 		return sysfs_emit(buf, "ERROR-UNKNOWN-%ld\n", mem->state);
164 	}
165 
166 	return sysfs_emit(buf, "%s\n", output);
167 }
168 
memory_notify(unsigned long val,void * v)169 int memory_notify(unsigned long val, void *v)
170 {
171 	return blocking_notifier_call_chain(&memory_chain, val, v);
172 }
173 
174 #if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_MEMORY_HOTPLUG)
175 static unsigned long memblk_nr_poison(struct memory_block *mem);
176 #else
memblk_nr_poison(struct memory_block * mem)177 static inline unsigned long memblk_nr_poison(struct memory_block *mem)
178 {
179 	return 0;
180 }
181 #endif
182 
183 /*
184  * Must acquire mem_hotplug_lock in write mode.
185  */
memory_block_online(struct memory_block * mem)186 static int memory_block_online(struct memory_block *mem)
187 {
188 	unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
189 	unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
190 	unsigned long nr_vmemmap_pages = 0;
191 	struct memory_notify arg;
192 	struct zone *zone;
193 	int ret;
194 
195 	if (memblk_nr_poison(mem))
196 		return -EHWPOISON;
197 
198 	zone = zone_for_pfn_range(mem->online_type, mem->nid, mem->group,
199 				  start_pfn, nr_pages);
200 
201 	/*
202 	 * Although vmemmap pages have a different lifecycle than the pages
203 	 * they describe (they remain until the memory is unplugged), doing
204 	 * their initialization and accounting at memory onlining/offlining
205 	 * stage helps to keep accounting easier to follow - e.g vmemmaps
206 	 * belong to the same zone as the memory they backed.
207 	 */
208 	if (mem->altmap)
209 		nr_vmemmap_pages = mem->altmap->free;
210 
211 	arg.altmap_start_pfn = start_pfn;
212 	arg.altmap_nr_pages = nr_vmemmap_pages;
213 	arg.start_pfn = start_pfn + nr_vmemmap_pages;
214 	arg.nr_pages = nr_pages - nr_vmemmap_pages;
215 	mem_hotplug_begin();
216 	ret = memory_notify(MEM_PREPARE_ONLINE, &arg);
217 	ret = notifier_to_errno(ret);
218 	if (ret)
219 		goto out_notifier;
220 
221 	if (nr_vmemmap_pages) {
222 		ret = mhp_init_memmap_on_memory(start_pfn, nr_vmemmap_pages,
223 						zone, mem->altmap->inaccessible);
224 		if (ret)
225 			goto out;
226 	}
227 
228 	ret = online_pages(start_pfn + nr_vmemmap_pages,
229 			   nr_pages - nr_vmemmap_pages, zone, mem->group);
230 	if (ret) {
231 		if (nr_vmemmap_pages)
232 			mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages);
233 		goto out;
234 	}
235 
236 	/*
237 	 * Account once onlining succeeded. If the zone was unpopulated, it is
238 	 * now already properly populated.
239 	 */
240 	if (nr_vmemmap_pages)
241 		adjust_present_page_count(pfn_to_page(start_pfn), mem->group,
242 					  nr_vmemmap_pages);
243 
244 	mem->zone = zone;
245 	mem_hotplug_done();
246 	return ret;
247 out:
248 	memory_notify(MEM_FINISH_OFFLINE, &arg);
249 out_notifier:
250 	mem_hotplug_done();
251 	return ret;
252 }
253 
254 /*
255  * Must acquire mem_hotplug_lock in write mode.
256  */
memory_block_offline(struct memory_block * mem)257 static int memory_block_offline(struct memory_block *mem)
258 {
259 	unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
260 	unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
261 	unsigned long nr_vmemmap_pages = 0;
262 	struct memory_notify arg;
263 	int ret;
264 
265 	if (!mem->zone)
266 		return -EINVAL;
267 
268 	/*
269 	 * Unaccount before offlining, such that unpopulated zone and kthreads
270 	 * can properly be torn down in offline_pages().
271 	 */
272 	if (mem->altmap)
273 		nr_vmemmap_pages = mem->altmap->free;
274 
275 	mem_hotplug_begin();
276 	if (nr_vmemmap_pages)
277 		adjust_present_page_count(pfn_to_page(start_pfn), mem->group,
278 					  -nr_vmemmap_pages);
279 
280 	ret = offline_pages(start_pfn + nr_vmemmap_pages,
281 			    nr_pages - nr_vmemmap_pages, mem->zone, mem->group);
282 	if (ret) {
283 		/* offline_pages() failed. Account back. */
284 		if (nr_vmemmap_pages)
285 			adjust_present_page_count(pfn_to_page(start_pfn),
286 						  mem->group, nr_vmemmap_pages);
287 		goto out;
288 	}
289 
290 	if (nr_vmemmap_pages)
291 		mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages);
292 
293 	mem->zone = NULL;
294 	arg.altmap_start_pfn = start_pfn;
295 	arg.altmap_nr_pages = nr_vmemmap_pages;
296 	arg.start_pfn = start_pfn + nr_vmemmap_pages;
297 	arg.nr_pages = nr_pages - nr_vmemmap_pages;
298 	memory_notify(MEM_FINISH_OFFLINE, &arg);
299 out:
300 	mem_hotplug_done();
301 	return ret;
302 }
303 
304 /*
305  * MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is
306  * OK to have direct references to sparsemem variables in here.
307  */
308 static int
memory_block_action(struct memory_block * mem,unsigned long action)309 memory_block_action(struct memory_block *mem, unsigned long action)
310 {
311 	int ret;
312 
313 	switch (action) {
314 	case MEM_ONLINE:
315 		ret = memory_block_online(mem);
316 		break;
317 	case MEM_OFFLINE:
318 		ret = memory_block_offline(mem);
319 		break;
320 	default:
321 		WARN(1, KERN_WARNING "%s(%ld, %ld) unknown action: "
322 		     "%ld\n", __func__, mem->start_section_nr, action, action);
323 		ret = -EINVAL;
324 	}
325 
326 	return ret;
327 }
328 
memory_block_change_state(struct memory_block * mem,unsigned long to_state,unsigned long from_state_req)329 static int memory_block_change_state(struct memory_block *mem,
330 		unsigned long to_state, unsigned long from_state_req)
331 {
332 	int ret = 0;
333 
334 	if (mem->state != from_state_req)
335 		return -EINVAL;
336 
337 	if (to_state == MEM_OFFLINE)
338 		mem->state = MEM_GOING_OFFLINE;
339 
340 	ret = memory_block_action(mem, to_state);
341 	mem->state = ret ? from_state_req : to_state;
342 
343 	return ret;
344 }
345 
346 /* The device lock serializes operations on memory_subsys_[online|offline] */
memory_subsys_online(struct device * dev)347 static int memory_subsys_online(struct device *dev)
348 {
349 	struct memory_block *mem = to_memory_block(dev);
350 	int ret;
351 
352 	if (mem->state == MEM_ONLINE)
353 		return 0;
354 
355 	/*
356 	 * When called via device_online() without configuring the online_type,
357 	 * we want to default to MMOP_ONLINE.
358 	 */
359 	if (mem->online_type == MMOP_OFFLINE)
360 		mem->online_type = MMOP_ONLINE;
361 
362 	ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE);
363 	mem->online_type = MMOP_OFFLINE;
364 
365 	return ret;
366 }
367 
memory_subsys_offline(struct device * dev)368 static int memory_subsys_offline(struct device *dev)
369 {
370 	struct memory_block *mem = to_memory_block(dev);
371 
372 	if (mem->state == MEM_OFFLINE)
373 		return 0;
374 
375 	return memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE);
376 }
377 
state_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)378 static ssize_t state_store(struct device *dev, struct device_attribute *attr,
379 			   const char *buf, size_t count)
380 {
381 	const int online_type = mhp_online_type_from_str(buf);
382 	struct memory_block *mem = to_memory_block(dev);
383 	int ret;
384 
385 	if (online_type < 0)
386 		return -EINVAL;
387 
388 	ret = lock_device_hotplug_sysfs();
389 	if (ret)
390 		return ret;
391 
392 	switch (online_type) {
393 	case MMOP_ONLINE_KERNEL:
394 	case MMOP_ONLINE_MOVABLE:
395 	case MMOP_ONLINE:
396 		/* mem->online_type is protected by device_hotplug_lock */
397 		mem->online_type = online_type;
398 		ret = device_online(&mem->dev);
399 		break;
400 	case MMOP_OFFLINE:
401 		ret = device_offline(&mem->dev);
402 		break;
403 	default:
404 		ret = -EINVAL; /* should never happen */
405 	}
406 
407 	unlock_device_hotplug();
408 
409 	if (ret < 0)
410 		return ret;
411 	if (ret)
412 		return -EINVAL;
413 
414 	return count;
415 }
416 
417 /*
418  * Legacy interface that we cannot remove: s390x exposes the storage increment
419  * covered by a memory block, allowing for identifying which memory blocks
420  * comprise a storage increment. Since a memory block spans complete
421  * storage increments nowadays, this interface is basically unused. Other
422  * archs never exposed != 0.
423  */
phys_device_show(struct device * dev,struct device_attribute * attr,char * buf)424 static ssize_t phys_device_show(struct device *dev,
425 				struct device_attribute *attr, char *buf)
426 {
427 	struct memory_block *mem = to_memory_block(dev);
428 	unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
429 
430 	return sysfs_emit(buf, "%d\n",
431 			  arch_get_memory_phys_device(start_pfn));
432 }
433 
434 #ifdef CONFIG_MEMORY_HOTREMOVE
print_allowed_zone(char * buf,int len,int nid,struct memory_group * group,unsigned long start_pfn,unsigned long nr_pages,int online_type,struct zone * default_zone)435 static int print_allowed_zone(char *buf, int len, int nid,
436 			      struct memory_group *group,
437 			      unsigned long start_pfn, unsigned long nr_pages,
438 			      int online_type, struct zone *default_zone)
439 {
440 	struct zone *zone;
441 
442 	zone = zone_for_pfn_range(online_type, nid, group, start_pfn, nr_pages);
443 	if (zone == default_zone)
444 		return 0;
445 
446 	return sysfs_emit_at(buf, len, " %s", zone->name);
447 }
448 
valid_zones_show(struct device * dev,struct device_attribute * attr,char * buf)449 static ssize_t valid_zones_show(struct device *dev,
450 				struct device_attribute *attr, char *buf)
451 {
452 	struct memory_block *mem = to_memory_block(dev);
453 	unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
454 	unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
455 	struct memory_group *group = mem->group;
456 	struct zone *default_zone;
457 	int nid = mem->nid;
458 	int len;
459 
460 	/*
461 	 * Check the existing zone. Make sure that we do that only on the
462 	 * online nodes otherwise the page_zone is not reliable
463 	 */
464 	if (mem->state == MEM_ONLINE) {
465 		/*
466 		 * If !mem->zone, the memory block spans multiple zones and
467 		 * cannot get offlined.
468 		 */
469 		return sysfs_emit(buf, "%s\n",
470 				  mem->zone ? mem->zone->name : "none");
471 	}
472 
473 	default_zone = zone_for_pfn_range(MMOP_ONLINE, nid, group,
474 					  start_pfn, nr_pages);
475 
476 	len = sysfs_emit(buf, "%s", default_zone->name);
477 	len += print_allowed_zone(buf, len, nid, group, start_pfn, nr_pages,
478 				  MMOP_ONLINE_KERNEL, default_zone);
479 	len += print_allowed_zone(buf, len, nid, group, start_pfn, nr_pages,
480 				  MMOP_ONLINE_MOVABLE, default_zone);
481 	len += sysfs_emit_at(buf, len, "\n");
482 	return len;
483 }
484 static DEVICE_ATTR_RO(valid_zones);
485 #endif
486 
487 static DEVICE_ATTR_RO(phys_index);
488 static DEVICE_ATTR_RW(state);
489 static DEVICE_ATTR_RO(phys_device);
490 static DEVICE_ATTR_RO(removable);
491 
492 /*
493  * Show the memory block size (shared by all memory blocks).
494  */
block_size_bytes_show(struct device * dev,struct device_attribute * attr,char * buf)495 static ssize_t block_size_bytes_show(struct device *dev,
496 				     struct device_attribute *attr, char *buf)
497 {
498 	return sysfs_emit(buf, "%lx\n", memory_block_size_bytes());
499 }
500 
501 static DEVICE_ATTR_RO(block_size_bytes);
502 
503 /*
504  * Memory auto online policy.
505  */
506 
auto_online_blocks_show(struct device * dev,struct device_attribute * attr,char * buf)507 static ssize_t auto_online_blocks_show(struct device *dev,
508 				       struct device_attribute *attr, char *buf)
509 {
510 	return sysfs_emit(buf, "%s\n",
511 			  online_type_to_str[mhp_get_default_online_type()]);
512 }
513 
auto_online_blocks_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)514 static ssize_t auto_online_blocks_store(struct device *dev,
515 					struct device_attribute *attr,
516 					const char *buf, size_t count)
517 {
518 	const int online_type = mhp_online_type_from_str(buf);
519 
520 	if (online_type < 0)
521 		return -EINVAL;
522 
523 	mhp_set_default_online_type(online_type);
524 	return count;
525 }
526 
527 static DEVICE_ATTR_RW(auto_online_blocks);
528 
529 #ifdef CONFIG_CRASH_HOTPLUG
530 #include <linux/kexec.h>
crash_hotplug_show(struct device * dev,struct device_attribute * attr,char * buf)531 static ssize_t crash_hotplug_show(struct device *dev,
532 				       struct device_attribute *attr, char *buf)
533 {
534 	return sysfs_emit(buf, "%d\n", crash_check_hotplug_support());
535 }
536 static DEVICE_ATTR_RO(crash_hotplug);
537 #endif
538 
539 /*
540  * Some architectures will have custom drivers to do this, and
541  * will not need to do it from userspace.  The fake hot-add code
542  * as well as ppc64 will do all of their discovery in userspace
543  * and will require this interface.
544  */
545 #ifdef CONFIG_ARCH_MEMORY_PROBE
probe_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)546 static ssize_t probe_store(struct device *dev, struct device_attribute *attr,
547 			   const char *buf, size_t count)
548 {
549 	u64 phys_addr;
550 	int nid, ret;
551 	unsigned long pages_per_block = PAGES_PER_SECTION * sections_per_block;
552 
553 	ret = kstrtoull(buf, 0, &phys_addr);
554 	if (ret)
555 		return ret;
556 
557 	if (phys_addr & ((pages_per_block << PAGE_SHIFT) - 1))
558 		return -EINVAL;
559 
560 	ret = lock_device_hotplug_sysfs();
561 	if (ret)
562 		return ret;
563 
564 	nid = memory_add_physaddr_to_nid(phys_addr);
565 	ret = __add_memory(nid, phys_addr,
566 			   MIN_MEMORY_BLOCK_SIZE * sections_per_block,
567 			   MHP_NONE);
568 
569 	if (ret)
570 		goto out;
571 
572 	ret = count;
573 out:
574 	unlock_device_hotplug();
575 	return ret;
576 }
577 
578 static DEVICE_ATTR_WO(probe);
579 #endif
580 
581 #ifdef CONFIG_MEMORY_FAILURE
582 /*
583  * Support for offlining pages of memory
584  */
585 
586 /* Soft offline a page */
soft_offline_page_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)587 static ssize_t soft_offline_page_store(struct device *dev,
588 				       struct device_attribute *attr,
589 				       const char *buf, size_t count)
590 {
591 	int ret;
592 	u64 pfn;
593 	if (!capable(CAP_SYS_ADMIN))
594 		return -EPERM;
595 	if (kstrtoull(buf, 0, &pfn) < 0)
596 		return -EINVAL;
597 	pfn >>= PAGE_SHIFT;
598 	ret = soft_offline_page(pfn, 0);
599 	return ret == 0 ? count : ret;
600 }
601 
602 /* Forcibly offline a page, including killing processes. */
hard_offline_page_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)603 static ssize_t hard_offline_page_store(struct device *dev,
604 				       struct device_attribute *attr,
605 				       const char *buf, size_t count)
606 {
607 	int ret;
608 	u64 pfn;
609 	if (!capable(CAP_SYS_ADMIN))
610 		return -EPERM;
611 	if (kstrtoull(buf, 0, &pfn) < 0)
612 		return -EINVAL;
613 	pfn >>= PAGE_SHIFT;
614 	ret = memory_failure(pfn, MF_SW_SIMULATED);
615 	if (ret == -EOPNOTSUPP)
616 		ret = 0;
617 	return ret ? ret : count;
618 }
619 
620 static DEVICE_ATTR_WO(soft_offline_page);
621 static DEVICE_ATTR_WO(hard_offline_page);
622 #endif
623 
624 /* See phys_device_show(). */
arch_get_memory_phys_device(unsigned long start_pfn)625 int __weak arch_get_memory_phys_device(unsigned long start_pfn)
626 {
627 	return 0;
628 }
629 
630 /*
631  * A reference for the returned memory block device is acquired.
632  *
633  * Called under device_hotplug_lock.
634  */
find_memory_block_by_id(unsigned long block_id)635 static struct memory_block *find_memory_block_by_id(unsigned long block_id)
636 {
637 	struct memory_block *mem;
638 
639 	mem = xa_load(&memory_blocks, block_id);
640 	if (mem)
641 		get_device(&mem->dev);
642 	return mem;
643 }
644 
645 /*
646  * Called under device_hotplug_lock.
647  */
find_memory_block(unsigned long section_nr)648 struct memory_block *find_memory_block(unsigned long section_nr)
649 {
650 	unsigned long block_id = memory_block_id(section_nr);
651 
652 	return find_memory_block_by_id(block_id);
653 }
654 
655 static struct attribute *memory_memblk_attrs[] = {
656 	&dev_attr_phys_index.attr,
657 	&dev_attr_state.attr,
658 	&dev_attr_phys_device.attr,
659 	&dev_attr_removable.attr,
660 #ifdef CONFIG_MEMORY_HOTREMOVE
661 	&dev_attr_valid_zones.attr,
662 #endif
663 	NULL
664 };
665 
666 static const struct attribute_group memory_memblk_attr_group = {
667 	.attrs = memory_memblk_attrs,
668 };
669 
670 static const struct attribute_group *memory_memblk_attr_groups[] = {
671 	&memory_memblk_attr_group,
672 	NULL,
673 };
674 
__add_memory_block(struct memory_block * memory)675 static int __add_memory_block(struct memory_block *memory)
676 {
677 	int ret;
678 
679 	memory->dev.bus = &memory_subsys;
680 	memory->dev.id = memory->start_section_nr / sections_per_block;
681 	memory->dev.release = memory_block_release;
682 	memory->dev.groups = memory_memblk_attr_groups;
683 	memory->dev.offline = memory->state == MEM_OFFLINE;
684 
685 	ret = device_register(&memory->dev);
686 	if (ret) {
687 		put_device(&memory->dev);
688 		return ret;
689 	}
690 	ret = xa_err(xa_store(&memory_blocks, memory->dev.id, memory,
691 			      GFP_KERNEL));
692 	if (ret)
693 		device_unregister(&memory->dev);
694 
695 	return ret;
696 }
697 
early_node_zone_for_memory_block(struct memory_block * mem,int nid)698 static struct zone *early_node_zone_for_memory_block(struct memory_block *mem,
699 						     int nid)
700 {
701 	const unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
702 	const unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
703 	struct zone *zone, *matching_zone = NULL;
704 	pg_data_t *pgdat = NODE_DATA(nid);
705 	int i;
706 
707 	/*
708 	 * This logic only works for early memory, when the applicable zones
709 	 * already span the memory block. We don't expect overlapping zones on
710 	 * a single node for early memory. So if we're told that some PFNs
711 	 * of a node fall into this memory block, we can assume that all node
712 	 * zones that intersect with the memory block are actually applicable.
713 	 * No need to look at the memmap.
714 	 */
715 	for (i = 0; i < MAX_NR_ZONES; i++) {
716 		zone = pgdat->node_zones + i;
717 		if (!populated_zone(zone))
718 			continue;
719 		if (!zone_intersects(zone, start_pfn, nr_pages))
720 			continue;
721 		if (!matching_zone) {
722 			matching_zone = zone;
723 			continue;
724 		}
725 		/* Spans multiple zones ... */
726 		matching_zone = NULL;
727 		break;
728 	}
729 	return matching_zone;
730 }
731 
732 #ifdef CONFIG_NUMA
733 /**
734  * memory_block_add_nid() - Indicate that system RAM falling into this memory
735  *			    block device (partially) belongs to the given node.
736  * @mem: The memory block device.
737  * @nid: The node id.
738  * @context: The memory initialization context.
739  *
740  * Indicate that system RAM falling into this memory block (partially) belongs
741  * to the given node. If the context indicates ("early") that we are adding the
742  * node during node device subsystem initialization, this will also properly
743  * set/adjust mem->zone based on the zone ranges of the given node.
744  */
memory_block_add_nid(struct memory_block * mem,int nid,enum meminit_context context)745 void memory_block_add_nid(struct memory_block *mem, int nid,
746 			  enum meminit_context context)
747 {
748 	if (context == MEMINIT_EARLY && mem->nid != nid) {
749 		/*
750 		 * For early memory we have to determine the zone when setting
751 		 * the node id and handle multiple nodes spanning a single
752 		 * memory block by indicate via zone == NULL that we're not
753 		 * dealing with a single zone. So if we're setting the node id
754 		 * the first time, determine if there is a single zone. If we're
755 		 * setting the node id a second time to a different node,
756 		 * invalidate the single detected zone.
757 		 */
758 		if (mem->nid == NUMA_NO_NODE)
759 			mem->zone = early_node_zone_for_memory_block(mem, nid);
760 		else
761 			mem->zone = NULL;
762 	}
763 
764 	/*
765 	 * If this memory block spans multiple nodes, we only indicate
766 	 * the last processed node. If we span multiple nodes (not applicable
767 	 * to hotplugged memory), zone == NULL will prohibit memory offlining
768 	 * and consequently unplug.
769 	 */
770 	mem->nid = nid;
771 }
772 #endif
773 
add_memory_block(unsigned long block_id,unsigned long state,struct vmem_altmap * altmap,struct memory_group * group)774 static int add_memory_block(unsigned long block_id, unsigned long state,
775 			    struct vmem_altmap *altmap,
776 			    struct memory_group *group)
777 {
778 	struct memory_block *mem;
779 	int ret = 0;
780 
781 	mem = find_memory_block_by_id(block_id);
782 	if (mem) {
783 		put_device(&mem->dev);
784 		return -EEXIST;
785 	}
786 	mem = kzalloc(sizeof(*mem), GFP_KERNEL);
787 	if (!mem)
788 		return -ENOMEM;
789 
790 	mem->start_section_nr = block_id * sections_per_block;
791 	mem->state = state;
792 	mem->nid = NUMA_NO_NODE;
793 	mem->altmap = altmap;
794 	INIT_LIST_HEAD(&mem->group_next);
795 
796 #ifndef CONFIG_NUMA
797 	if (state == MEM_ONLINE)
798 		/*
799 		 * MEM_ONLINE at this point implies early memory. With NUMA,
800 		 * we'll determine the zone when setting the node id via
801 		 * memory_block_add_nid(). Memory hotplug updated the zone
802 		 * manually when memory onlining/offlining succeeds.
803 		 */
804 		mem->zone = early_node_zone_for_memory_block(mem, NUMA_NO_NODE);
805 #endif /* CONFIG_NUMA */
806 
807 	ret = __add_memory_block(mem);
808 	if (ret)
809 		return ret;
810 
811 	if (group) {
812 		mem->group = group;
813 		list_add(&mem->group_next, &group->memory_blocks);
814 	}
815 
816 	return 0;
817 }
818 
add_hotplug_memory_block(unsigned long block_id,struct vmem_altmap * altmap,struct memory_group * group)819 static int add_hotplug_memory_block(unsigned long block_id,
820 				    struct vmem_altmap *altmap,
821 				    struct memory_group *group)
822 {
823 	return add_memory_block(block_id, MEM_OFFLINE, altmap, group);
824 }
825 
remove_memory_block(struct memory_block * memory)826 static void remove_memory_block(struct memory_block *memory)
827 {
828 	if (WARN_ON_ONCE(memory->dev.bus != &memory_subsys))
829 		return;
830 
831 	WARN_ON(xa_erase(&memory_blocks, memory->dev.id) == NULL);
832 
833 	if (memory->group) {
834 		list_del(&memory->group_next);
835 		memory->group = NULL;
836 	}
837 
838 	/* drop the ref. we got via find_memory_block() */
839 	put_device(&memory->dev);
840 	device_unregister(&memory->dev);
841 }
842 
843 /*
844  * Create memory block devices for the given memory area. Start and size
845  * have to be aligned to memory block granularity. Memory block devices
846  * will be initialized as offline.
847  *
848  * Called under device_hotplug_lock.
849  */
create_memory_block_devices(unsigned long start,unsigned long size,struct vmem_altmap * altmap,struct memory_group * group)850 int create_memory_block_devices(unsigned long start, unsigned long size,
851 				struct vmem_altmap *altmap,
852 				struct memory_group *group)
853 {
854 	const unsigned long start_block_id = pfn_to_block_id(PFN_DOWN(start));
855 	unsigned long end_block_id = pfn_to_block_id(PFN_DOWN(start + size));
856 	struct memory_block *mem;
857 	unsigned long block_id;
858 	int ret = 0;
859 
860 	if (WARN_ON_ONCE(!IS_ALIGNED(start, memory_block_size_bytes()) ||
861 			 !IS_ALIGNED(size, memory_block_size_bytes())))
862 		return -EINVAL;
863 
864 	for (block_id = start_block_id; block_id != end_block_id; block_id++) {
865 		ret = add_hotplug_memory_block(block_id, altmap, group);
866 		if (ret)
867 			break;
868 	}
869 	if (ret) {
870 		end_block_id = block_id;
871 		for (block_id = start_block_id; block_id != end_block_id;
872 		     block_id++) {
873 			mem = find_memory_block_by_id(block_id);
874 			if (WARN_ON_ONCE(!mem))
875 				continue;
876 			remove_memory_block(mem);
877 		}
878 	}
879 	return ret;
880 }
881 
882 /*
883  * Remove memory block devices for the given memory area. Start and size
884  * have to be aligned to memory block granularity. Memory block devices
885  * have to be offline.
886  *
887  * Called under device_hotplug_lock.
888  */
remove_memory_block_devices(unsigned long start,unsigned long size)889 void remove_memory_block_devices(unsigned long start, unsigned long size)
890 {
891 	const unsigned long start_block_id = pfn_to_block_id(PFN_DOWN(start));
892 	const unsigned long end_block_id = pfn_to_block_id(PFN_DOWN(start + size));
893 	struct memory_block *mem;
894 	unsigned long block_id;
895 
896 	if (WARN_ON_ONCE(!IS_ALIGNED(start, memory_block_size_bytes()) ||
897 			 !IS_ALIGNED(size, memory_block_size_bytes())))
898 		return;
899 
900 	for (block_id = start_block_id; block_id != end_block_id; block_id++) {
901 		mem = find_memory_block_by_id(block_id);
902 		if (WARN_ON_ONCE(!mem))
903 			continue;
904 		num_poisoned_pages_sub(-1UL, memblk_nr_poison(mem));
905 		unregister_memory_block_under_nodes(mem);
906 		remove_memory_block(mem);
907 	}
908 }
909 
910 static struct attribute *memory_root_attrs[] = {
911 #ifdef CONFIG_ARCH_MEMORY_PROBE
912 	&dev_attr_probe.attr,
913 #endif
914 
915 #ifdef CONFIG_MEMORY_FAILURE
916 	&dev_attr_soft_offline_page.attr,
917 	&dev_attr_hard_offline_page.attr,
918 #endif
919 
920 	&dev_attr_block_size_bytes.attr,
921 	&dev_attr_auto_online_blocks.attr,
922 #ifdef CONFIG_CRASH_HOTPLUG
923 	&dev_attr_crash_hotplug.attr,
924 #endif
925 	NULL
926 };
927 
928 static const struct attribute_group memory_root_attr_group = {
929 	.attrs = memory_root_attrs,
930 };
931 
932 static const struct attribute_group *memory_root_attr_groups[] = {
933 	&memory_root_attr_group,
934 	NULL,
935 };
936 
937 /*
938  * Initialize the sysfs support for memory devices. At the time this function
939  * is called, we cannot have concurrent creation/deletion of memory block
940  * devices, the device_hotplug_lock is not needed.
941  */
memory_dev_init(void)942 void __init memory_dev_init(void)
943 {
944 	int ret;
945 	unsigned long block_sz, block_id, nr;
946 
947 	/* Validate the configured memory block size */
948 	block_sz = memory_block_size_bytes();
949 	if (!is_power_of_2(block_sz) || block_sz < MIN_MEMORY_BLOCK_SIZE)
950 		panic("Memory block size not suitable: 0x%lx\n", block_sz);
951 	sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
952 
953 	ret = subsys_system_register(&memory_subsys, memory_root_attr_groups);
954 	if (ret)
955 		panic("%s() failed to register subsystem: %d\n", __func__, ret);
956 
957 	/*
958 	 * Create entries for memory sections that were found during boot
959 	 * and have been initialized. Use @block_id to track the last
960 	 * handled block and initialize it to an invalid value (ULONG_MAX)
961 	 * to bypass the block ID matching check for the first present
962 	 * block so that it can be covered.
963 	 */
964 	block_id = ULONG_MAX;
965 	for_each_present_section_nr(0, nr) {
966 		if (block_id != ULONG_MAX && memory_block_id(nr) == block_id)
967 			continue;
968 
969 		block_id = memory_block_id(nr);
970 		ret = add_memory_block(block_id, MEM_ONLINE, NULL, NULL);
971 		if (ret) {
972 			panic("%s() failed to add memory block: %d\n",
973 			      __func__, ret);
974 		}
975 	}
976 }
977 
978 /**
979  * walk_memory_blocks - walk through all present memory blocks overlapped
980  *			by the range [start, start + size)
981  *
982  * @start: start address of the memory range
983  * @size: size of the memory range
984  * @arg: argument passed to func
985  * @func: callback for each memory section walked
986  *
987  * This function walks through all present memory blocks overlapped by the
988  * range [start, start + size), calling func on each memory block.
989  *
990  * In case func() returns an error, walking is aborted and the error is
991  * returned.
992  *
993  * Called under device_hotplug_lock.
994  */
walk_memory_blocks(unsigned long start,unsigned long size,void * arg,walk_memory_blocks_func_t func)995 int walk_memory_blocks(unsigned long start, unsigned long size,
996 		       void *arg, walk_memory_blocks_func_t func)
997 {
998 	const unsigned long start_block_id = phys_to_block_id(start);
999 	const unsigned long end_block_id = phys_to_block_id(start + size - 1);
1000 	struct memory_block *mem;
1001 	unsigned long block_id;
1002 	int ret = 0;
1003 
1004 	if (!size)
1005 		return 0;
1006 
1007 	for (block_id = start_block_id; block_id <= end_block_id; block_id++) {
1008 		mem = find_memory_block_by_id(block_id);
1009 		if (!mem)
1010 			continue;
1011 
1012 		ret = func(mem, arg);
1013 		put_device(&mem->dev);
1014 		if (ret)
1015 			break;
1016 	}
1017 	return ret;
1018 }
1019 
1020 struct for_each_memory_block_cb_data {
1021 	walk_memory_blocks_func_t func;
1022 	void *arg;
1023 };
1024 
for_each_memory_block_cb(struct device * dev,void * data)1025 static int for_each_memory_block_cb(struct device *dev, void *data)
1026 {
1027 	struct memory_block *mem = to_memory_block(dev);
1028 	struct for_each_memory_block_cb_data *cb_data = data;
1029 
1030 	return cb_data->func(mem, cb_data->arg);
1031 }
1032 
1033 /**
1034  * for_each_memory_block - walk through all present memory blocks
1035  *
1036  * @arg: argument passed to func
1037  * @func: callback for each memory block walked
1038  *
1039  * This function walks through all present memory blocks, calling func on
1040  * each memory block.
1041  *
1042  * In case func() returns an error, walking is aborted and the error is
1043  * returned.
1044  */
for_each_memory_block(void * arg,walk_memory_blocks_func_t func)1045 int for_each_memory_block(void *arg, walk_memory_blocks_func_t func)
1046 {
1047 	struct for_each_memory_block_cb_data cb_data = {
1048 		.func = func,
1049 		.arg = arg,
1050 	};
1051 
1052 	return bus_for_each_dev(&memory_subsys, NULL, &cb_data,
1053 				for_each_memory_block_cb);
1054 }
1055 
1056 /*
1057  * This is an internal helper to unify allocation and initialization of
1058  * memory groups. Note that the passed memory group will be copied to a
1059  * dynamically allocated memory group. After this call, the passed
1060  * memory group should no longer be used.
1061  */
memory_group_register(struct memory_group group)1062 static int memory_group_register(struct memory_group group)
1063 {
1064 	struct memory_group *new_group;
1065 	uint32_t mgid;
1066 	int ret;
1067 
1068 	if (!node_possible(group.nid))
1069 		return -EINVAL;
1070 
1071 	new_group = kzalloc(sizeof(group), GFP_KERNEL);
1072 	if (!new_group)
1073 		return -ENOMEM;
1074 	*new_group = group;
1075 	INIT_LIST_HEAD(&new_group->memory_blocks);
1076 
1077 	ret = xa_alloc(&memory_groups, &mgid, new_group, xa_limit_31b,
1078 		       GFP_KERNEL);
1079 	if (ret) {
1080 		kfree(new_group);
1081 		return ret;
1082 	} else if (group.is_dynamic) {
1083 		xa_set_mark(&memory_groups, mgid, MEMORY_GROUP_MARK_DYNAMIC);
1084 	}
1085 	return mgid;
1086 }
1087 
1088 /**
1089  * memory_group_register_static() - Register a static memory group.
1090  * @nid: The node id.
1091  * @max_pages: The maximum number of pages we'll have in this static memory
1092  *	       group.
1093  *
1094  * Register a new static memory group and return the memory group id.
1095  * All memory in the group belongs to a single unit, such as a DIMM. All
1096  * memory belonging to a static memory group is added in one go to be removed
1097  * in one go -- it's static.
1098  *
1099  * Returns an error if out of memory, if the node id is invalid, if no new
1100  * memory groups can be registered, or if max_pages is invalid (0). Otherwise,
1101  * returns the new memory group id.
1102  */
memory_group_register_static(int nid,unsigned long max_pages)1103 int memory_group_register_static(int nid, unsigned long max_pages)
1104 {
1105 	struct memory_group group = {
1106 		.nid = nid,
1107 		.s = {
1108 			.max_pages = max_pages,
1109 		},
1110 	};
1111 
1112 	if (!max_pages)
1113 		return -EINVAL;
1114 	return memory_group_register(group);
1115 }
1116 EXPORT_SYMBOL_GPL(memory_group_register_static);
1117 
1118 /**
1119  * memory_group_register_dynamic() - Register a dynamic memory group.
1120  * @nid: The node id.
1121  * @unit_pages: Unit in pages in which is memory added/removed in this dynamic
1122  *		memory group.
1123  *
1124  * Register a new dynamic memory group and return the memory group id.
1125  * Memory within a dynamic memory group is added/removed dynamically
1126  * in unit_pages.
1127  *
1128  * Returns an error if out of memory, if the node id is invalid, if no new
1129  * memory groups can be registered, or if unit_pages is invalid (0, not a
1130  * power of two, smaller than a single memory block). Otherwise, returns the
1131  * new memory group id.
1132  */
memory_group_register_dynamic(int nid,unsigned long unit_pages)1133 int memory_group_register_dynamic(int nid, unsigned long unit_pages)
1134 {
1135 	struct memory_group group = {
1136 		.nid = nid,
1137 		.is_dynamic = true,
1138 		.d = {
1139 			.unit_pages = unit_pages,
1140 		},
1141 	};
1142 
1143 	if (!unit_pages || !is_power_of_2(unit_pages) ||
1144 	    unit_pages < PHYS_PFN(memory_block_size_bytes()))
1145 		return -EINVAL;
1146 	return memory_group_register(group);
1147 }
1148 EXPORT_SYMBOL_GPL(memory_group_register_dynamic);
1149 
1150 /**
1151  * memory_group_unregister() - Unregister a memory group.
1152  * @mgid: the memory group id
1153  *
1154  * Unregister a memory group. If any memory block still belongs to this
1155  * memory group, unregistering will fail.
1156  *
1157  * Returns -EINVAL if the memory group id is invalid, returns -EBUSY if some
1158  * memory blocks still belong to this memory group and returns 0 if
1159  * unregistering succeeded.
1160  */
memory_group_unregister(int mgid)1161 int memory_group_unregister(int mgid)
1162 {
1163 	struct memory_group *group;
1164 
1165 	if (mgid < 0)
1166 		return -EINVAL;
1167 
1168 	group = xa_load(&memory_groups, mgid);
1169 	if (!group)
1170 		return -EINVAL;
1171 	if (!list_empty(&group->memory_blocks))
1172 		return -EBUSY;
1173 	xa_erase(&memory_groups, mgid);
1174 	kfree(group);
1175 	return 0;
1176 }
1177 EXPORT_SYMBOL_GPL(memory_group_unregister);
1178 
1179 /*
1180  * This is an internal helper only to be used in core memory hotplug code to
1181  * lookup a memory group. We don't care about locking, as we don't expect a
1182  * memory group to get unregistered while adding memory to it -- because
1183  * the group and the memory is managed by the same driver.
1184  */
memory_group_find_by_id(int mgid)1185 struct memory_group *memory_group_find_by_id(int mgid)
1186 {
1187 	return xa_load(&memory_groups, mgid);
1188 }
1189 
1190 /*
1191  * This is an internal helper only to be used in core memory hotplug code to
1192  * walk all dynamic memory groups excluding a given memory group, either
1193  * belonging to a specific node, or belonging to any node.
1194  */
walk_dynamic_memory_groups(int nid,walk_memory_groups_func_t func,struct memory_group * excluded,void * arg)1195 int walk_dynamic_memory_groups(int nid, walk_memory_groups_func_t func,
1196 			       struct memory_group *excluded, void *arg)
1197 {
1198 	struct memory_group *group;
1199 	unsigned long index;
1200 	int ret = 0;
1201 
1202 	xa_for_each_marked(&memory_groups, index, group,
1203 			   MEMORY_GROUP_MARK_DYNAMIC) {
1204 		if (group == excluded)
1205 			continue;
1206 #ifdef CONFIG_NUMA
1207 		if (nid != NUMA_NO_NODE && group->nid != nid)
1208 			continue;
1209 #endif /* CONFIG_NUMA */
1210 		ret = func(group, arg);
1211 		if (ret)
1212 			break;
1213 	}
1214 	return ret;
1215 }
1216 
1217 #if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_MEMORY_HOTPLUG)
memblk_nr_poison_inc(unsigned long pfn)1218 void memblk_nr_poison_inc(unsigned long pfn)
1219 {
1220 	const unsigned long block_id = pfn_to_block_id(pfn);
1221 	struct memory_block *mem = find_memory_block_by_id(block_id);
1222 
1223 	if (mem)
1224 		atomic_long_inc(&mem->nr_hwpoison);
1225 }
1226 
memblk_nr_poison_sub(unsigned long pfn,long i)1227 void memblk_nr_poison_sub(unsigned long pfn, long i)
1228 {
1229 	const unsigned long block_id = pfn_to_block_id(pfn);
1230 	struct memory_block *mem = find_memory_block_by_id(block_id);
1231 
1232 	if (mem)
1233 		atomic_long_sub(i, &mem->nr_hwpoison);
1234 }
1235 
memblk_nr_poison(struct memory_block * mem)1236 static unsigned long memblk_nr_poison(struct memory_block *mem)
1237 {
1238 	return atomic_long_read(&mem->nr_hwpoison);
1239 }
1240 #endif
1241