xref: /linux/block/disk-events.c (revision 7fe6ac157b7e15c8976bd62ad7cb98e248884e83)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Disk events - monitor disk events like media change and eject request.
4  */
5 #include <linux/export.h>
6 #include <linux/moduleparam.h>
7 #include <linux/blkdev.h>
8 #include "blk.h"
9 
10 struct disk_events {
11 	struct list_head	node;		/* all disk_event's */
12 	struct gendisk		*disk;		/* the associated disk */
13 	spinlock_t		lock;
14 
15 	struct mutex		block_mutex;	/* protects blocking */
16 	int			block;		/* event blocking depth */
17 	unsigned int		pending;	/* events already sent out */
18 	unsigned int		clearing;	/* events being cleared */
19 
20 	long			poll_msecs;	/* interval, -1 for default */
21 	struct delayed_work	dwork;
22 };
23 
24 static const char *disk_events_strs[] = {
25 	[ilog2(DISK_EVENT_MEDIA_CHANGE)]	= "media_change",
26 	[ilog2(DISK_EVENT_EJECT_REQUEST)]	= "eject_request",
27 };
28 
29 static char *disk_uevents[] = {
30 	[ilog2(DISK_EVENT_MEDIA_CHANGE)]	= "DISK_MEDIA_CHANGE=1",
31 	[ilog2(DISK_EVENT_EJECT_REQUEST)]	= "DISK_EJECT_REQUEST=1",
32 };
33 
34 /* list of all disk_events */
35 static DEFINE_MUTEX(disk_events_mutex);
36 static LIST_HEAD(disk_events);
37 
38 /* disable in-kernel polling by default */
39 static unsigned long disk_events_dfl_poll_msecs;
40 
disk_events_poll_jiffies(struct gendisk * disk)41 static unsigned long disk_events_poll_jiffies(struct gendisk *disk)
42 {
43 	struct disk_events *ev = disk->ev;
44 	long intv_msecs = 0;
45 
46 	/*
47 	 * If device-specific poll interval is set, always use it.  If
48 	 * the default is being used, poll if the POLL flag is set.
49 	 */
50 	if (ev->poll_msecs >= 0)
51 		intv_msecs = ev->poll_msecs;
52 	else if (disk->event_flags & DISK_EVENT_FLAG_POLL)
53 		intv_msecs = disk_events_dfl_poll_msecs;
54 
55 	return msecs_to_jiffies(intv_msecs);
56 }
57 
58 /**
59  * disk_block_events - block and flush disk event checking
60  * @disk: disk to block events for
61  *
62  * On return from this function, it is guaranteed that event checking
63  * isn't in progress and won't happen until unblocked by
64  * disk_unblock_events().  Events blocking is counted and the actual
65  * unblocking happens after the matching number of unblocks are done.
66  *
67  * Note that this intentionally does not block event checking from
68  * disk_clear_events().
69  *
70  * CONTEXT:
71  * Might sleep.
72  */
disk_block_events(struct gendisk * disk)73 void disk_block_events(struct gendisk *disk)
74 {
75 	struct disk_events *ev = disk->ev;
76 	unsigned long flags;
77 	bool cancel;
78 
79 	if (!ev)
80 		return;
81 
82 	/*
83 	 * Outer mutex ensures that the first blocker completes canceling
84 	 * the event work before further blockers are allowed to finish.
85 	 */
86 	mutex_lock(&ev->block_mutex);
87 
88 	spin_lock_irqsave(&ev->lock, flags);
89 	cancel = !ev->block++;
90 	spin_unlock_irqrestore(&ev->lock, flags);
91 
92 	if (cancel)
93 		cancel_delayed_work_sync(&disk->ev->dwork);
94 
95 	mutex_unlock(&ev->block_mutex);
96 }
97 
__disk_unblock_events(struct gendisk * disk,bool check_now)98 static void __disk_unblock_events(struct gendisk *disk, bool check_now)
99 {
100 	struct disk_events *ev = disk->ev;
101 	unsigned long intv;
102 	unsigned long flags;
103 
104 	spin_lock_irqsave(&ev->lock, flags);
105 
106 	if (WARN_ON_ONCE(ev->block <= 0))
107 		goto out_unlock;
108 
109 	if (--ev->block)
110 		goto out_unlock;
111 
112 	intv = disk_events_poll_jiffies(disk);
113 	if (check_now)
114 		queue_delayed_work(system_freezable_power_efficient_wq,
115 				&ev->dwork, 0);
116 	else if (intv)
117 		queue_delayed_work(system_freezable_power_efficient_wq,
118 				&ev->dwork, intv);
119 out_unlock:
120 	spin_unlock_irqrestore(&ev->lock, flags);
121 }
122 
123 /**
124  * disk_unblock_events - unblock disk event checking
125  * @disk: disk to unblock events for
126  *
127  * Undo disk_block_events().  When the block count reaches zero, it
128  * starts events polling if configured.
129  *
130  * CONTEXT:
131  * Don't care.  Safe to call from irq context.
132  */
disk_unblock_events(struct gendisk * disk)133 void disk_unblock_events(struct gendisk *disk)
134 {
135 	if (disk->ev)
136 		__disk_unblock_events(disk, false);
137 }
138 
139 /**
140  * disk_flush_events - schedule immediate event checking and flushing
141  * @disk: disk to check and flush events for
142  * @mask: events to flush
143  *
144  * Schedule immediate event checking on @disk if not blocked.  Events in
145  * @mask are scheduled to be cleared from the driver.  Note that this
146  * doesn't clear the events from @disk->ev.
147  *
148  * CONTEXT:
149  * If @mask is non-zero must be called with disk->open_mutex held.
150  */
disk_flush_events(struct gendisk * disk,unsigned int mask)151 void disk_flush_events(struct gendisk *disk, unsigned int mask)
152 {
153 	struct disk_events *ev = disk->ev;
154 
155 	if (!ev)
156 		return;
157 
158 	spin_lock_irq(&ev->lock);
159 	ev->clearing |= mask;
160 	if (!ev->block)
161 		mod_delayed_work(system_freezable_power_efficient_wq,
162 				&ev->dwork, 0);
163 	spin_unlock_irq(&ev->lock);
164 }
165 
166 /*
167  * Tell userland about new events.  Only the events listed in @disk->events are
168  * reported, and only if DISK_EVENT_FLAG_UEVENT is set.  Otherwise, events are
169  * processed internally but never get reported to userland.
170  */
disk_event_uevent(struct gendisk * disk,unsigned int events)171 static void disk_event_uevent(struct gendisk *disk, unsigned int events)
172 {
173 	char *envp[ARRAY_SIZE(disk_uevents) + 1] = { };
174 	int nr_events = 0, i;
175 
176 	for (i = 0; i < ARRAY_SIZE(disk_uevents); i++)
177 		if (events & disk->events & (1 << i))
178 			envp[nr_events++] = disk_uevents[i];
179 
180 	if (nr_events)
181 		kobject_uevent_env(&disk_to_dev(disk)->kobj, KOBJ_CHANGE, envp);
182 }
183 
disk_check_events(struct disk_events * ev,unsigned int * clearing_ptr)184 static void disk_check_events(struct disk_events *ev,
185 			      unsigned int *clearing_ptr)
186 {
187 	struct gendisk *disk = ev->disk;
188 	unsigned int clearing = *clearing_ptr;
189 	unsigned int events;
190 	unsigned long intv;
191 
192 	/* check events */
193 	events = disk->fops->check_events(disk, clearing);
194 
195 	/* accumulate pending events and schedule next poll if necessary */
196 	spin_lock_irq(&ev->lock);
197 
198 	events &= ~ev->pending;
199 	ev->pending |= events;
200 	*clearing_ptr &= ~clearing;
201 
202 	intv = disk_events_poll_jiffies(disk);
203 	if (!ev->block && intv)
204 		queue_delayed_work(system_freezable_power_efficient_wq,
205 				&ev->dwork, intv);
206 
207 	spin_unlock_irq(&ev->lock);
208 
209 	if (events & DISK_EVENT_MEDIA_CHANGE)
210 		inc_diskseq(disk);
211 
212 	if (disk->event_flags & DISK_EVENT_FLAG_UEVENT)
213 		disk_event_uevent(disk, events);
214 }
215 
216 /**
217  * disk_clear_events - synchronously check, clear and return pending events
218  * @disk: disk to fetch and clear events from
219  * @mask: mask of events to be fetched and cleared
220  *
221  * Disk events are synchronously checked and pending events in @mask
222  * are cleared and returned.  This ignores the block count.
223  *
224  * CONTEXT:
225  * Might sleep.
226  */
disk_clear_events(struct gendisk * disk,unsigned int mask)227 static unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask)
228 {
229 	struct disk_events *ev = disk->ev;
230 	unsigned int pending;
231 	unsigned int clearing = mask;
232 
233 	if (!ev)
234 		return 0;
235 
236 	disk_block_events(disk);
237 
238 	/*
239 	 * store the union of mask and ev->clearing on the stack so that the
240 	 * race with disk_flush_events does not cause ambiguity (ev->clearing
241 	 * can still be modified even if events are blocked).
242 	 */
243 	spin_lock_irq(&ev->lock);
244 	clearing |= ev->clearing;
245 	ev->clearing = 0;
246 	spin_unlock_irq(&ev->lock);
247 
248 	disk_check_events(ev, &clearing);
249 	/*
250 	 * if ev->clearing is not 0, the disk_flush_events got called in the
251 	 * middle of this function, so we want to run the workfn without delay.
252 	 */
253 	__disk_unblock_events(disk, ev->clearing ? true : false);
254 
255 	/* then, fetch and clear pending events */
256 	spin_lock_irq(&ev->lock);
257 	pending = ev->pending & mask;
258 	ev->pending &= ~mask;
259 	spin_unlock_irq(&ev->lock);
260 	WARN_ON_ONCE(clearing & mask);
261 
262 	return pending;
263 }
264 
265 /**
266  * disk_check_media_change - check if a removable media has been changed
267  * @disk: gendisk to check
268  *
269  * Returns %true and marks the disk for a partition rescan whether a removable
270  * media has been changed, and %false if the media did not change.
271  */
disk_check_media_change(struct gendisk * disk)272 bool disk_check_media_change(struct gendisk *disk)
273 {
274 	unsigned int events;
275 
276 	events = disk_clear_events(disk, DISK_EVENT_MEDIA_CHANGE |
277 				   DISK_EVENT_EJECT_REQUEST);
278 	if (events & DISK_EVENT_MEDIA_CHANGE) {
279 		set_bit(GD_NEED_PART_SCAN, &disk->state);
280 		return true;
281 	}
282 	return false;
283 }
284 EXPORT_SYMBOL(disk_check_media_change);
285 
286 /**
287  * disk_force_media_change - force a media change event
288  * @disk: the disk which will raise the event
289  *
290  * Should be called when the media changes for @disk.  Generates a uevent
291  * and attempts to free all dentries and inodes and invalidates all block
292  * device page cache entries in that case.
293  *
294  * Callers that need a partition re-scan should arrange for one explicitly.
295  */
disk_force_media_change(struct gendisk * disk)296 void disk_force_media_change(struct gendisk *disk)
297 {
298 	disk_event_uevent(disk, DISK_EVENT_MEDIA_CHANGE);
299 	inc_diskseq(disk);
300 	bdev_mark_dead(disk->part0, true);
301 }
302 EXPORT_SYMBOL_GPL(disk_force_media_change);
303 
304 /*
305  * Separate this part out so that a different pointer for clearing_ptr can be
306  * passed in for disk_clear_events.
307  */
disk_events_workfn(struct work_struct * work)308 static void disk_events_workfn(struct work_struct *work)
309 {
310 	struct delayed_work *dwork = to_delayed_work(work);
311 	struct disk_events *ev = container_of(dwork, struct disk_events, dwork);
312 
313 	disk_check_events(ev, &ev->clearing);
314 }
315 
316 /*
317  * A disk events enabled device has the following sysfs nodes under
318  * its /sys/block/X/ directory.
319  *
320  * events		: list of all supported events
321  * events_async		: list of events which can be detected w/o polling
322  *			  (always empty, only for backwards compatibility)
323  * events_poll_msecs	: polling interval, 0: disable, -1: system default
324  */
__disk_events_show(unsigned int events,char * buf)325 static ssize_t __disk_events_show(unsigned int events, char *buf)
326 {
327 	const char *delim = "";
328 	ssize_t pos = 0;
329 	int i;
330 
331 	for (i = 0; i < ARRAY_SIZE(disk_events_strs); i++)
332 		if (events & (1 << i)) {
333 			pos += sprintf(buf + pos, "%s%s",
334 				       delim, disk_events_strs[i]);
335 			delim = " ";
336 		}
337 	if (pos)
338 		pos += sprintf(buf + pos, "\n");
339 	return pos;
340 }
341 
disk_events_show(struct device * dev,struct device_attribute * attr,char * buf)342 static ssize_t disk_events_show(struct device *dev,
343 				struct device_attribute *attr, char *buf)
344 {
345 	struct gendisk *disk = dev_to_disk(dev);
346 
347 	if (!(disk->event_flags & DISK_EVENT_FLAG_UEVENT))
348 		return 0;
349 	return __disk_events_show(disk->events, buf);
350 }
351 
disk_events_async_show(struct device * dev,struct device_attribute * attr,char * buf)352 static ssize_t disk_events_async_show(struct device *dev,
353 				      struct device_attribute *attr, char *buf)
354 {
355 	return 0;
356 }
357 
disk_events_poll_msecs_show(struct device * dev,struct device_attribute * attr,char * buf)358 static ssize_t disk_events_poll_msecs_show(struct device *dev,
359 					   struct device_attribute *attr,
360 					   char *buf)
361 {
362 	struct gendisk *disk = dev_to_disk(dev);
363 
364 	if (!disk->ev)
365 		return sprintf(buf, "-1\n");
366 	return sprintf(buf, "%ld\n", disk->ev->poll_msecs);
367 }
368 
disk_events_poll_msecs_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)369 static ssize_t disk_events_poll_msecs_store(struct device *dev,
370 					    struct device_attribute *attr,
371 					    const char *buf, size_t count)
372 {
373 	struct gendisk *disk = dev_to_disk(dev);
374 	long intv;
375 
376 	if (!count || !sscanf(buf, "%ld", &intv))
377 		return -EINVAL;
378 
379 	if (intv < 0 && intv != -1)
380 		return -EINVAL;
381 
382 	if (!disk->ev)
383 		return -ENODEV;
384 
385 	disk_block_events(disk);
386 	disk->ev->poll_msecs = intv;
387 	__disk_unblock_events(disk, true);
388 	return count;
389 }
390 
391 DEVICE_ATTR(events, 0444, disk_events_show, NULL);
392 DEVICE_ATTR(events_async, 0444, disk_events_async_show, NULL);
393 DEVICE_ATTR(events_poll_msecs, 0644, disk_events_poll_msecs_show,
394 	    disk_events_poll_msecs_store);
395 
396 /*
397  * The default polling interval can be specified by the kernel
398  * parameter block.events_dfl_poll_msecs which defaults to 0
399  * (disable).  This can also be modified runtime by writing to
400  * /sys/module/block/parameters/events_dfl_poll_msecs.
401  */
disk_events_set_dfl_poll_msecs(const char * val,const struct kernel_param * kp)402 static int disk_events_set_dfl_poll_msecs(const char *val,
403 					  const struct kernel_param *kp)
404 {
405 	struct disk_events *ev;
406 	int ret;
407 
408 	ret = param_set_ulong(val, kp);
409 	if (ret < 0)
410 		return ret;
411 
412 	mutex_lock(&disk_events_mutex);
413 	list_for_each_entry(ev, &disk_events, node)
414 		disk_flush_events(ev->disk, 0);
415 	mutex_unlock(&disk_events_mutex);
416 	return 0;
417 }
418 
419 static const struct kernel_param_ops disk_events_dfl_poll_msecs_param_ops = {
420 	.set	= disk_events_set_dfl_poll_msecs,
421 	.get	= param_get_ulong,
422 };
423 
424 #undef MODULE_PARAM_PREFIX
425 #define MODULE_PARAM_PREFIX	"block."
426 
427 module_param_cb(events_dfl_poll_msecs, &disk_events_dfl_poll_msecs_param_ops,
428 		&disk_events_dfl_poll_msecs, 0644);
429 
430 /*
431  * disk_{alloc|add|del|release}_events - initialize and destroy disk_events.
432  */
disk_alloc_events(struct gendisk * disk)433 int disk_alloc_events(struct gendisk *disk)
434 {
435 	struct disk_events *ev;
436 
437 	if (!disk->fops->check_events || !disk->events)
438 		return 0;
439 
440 	ev = kzalloc_obj(*ev);
441 	if (!ev) {
442 		pr_warn("%s: failed to initialize events\n", disk->disk_name);
443 		return -ENOMEM;
444 	}
445 
446 	INIT_LIST_HEAD(&ev->node);
447 	ev->disk = disk;
448 	spin_lock_init(&ev->lock);
449 	mutex_init(&ev->block_mutex);
450 	ev->block = 1;
451 	ev->poll_msecs = -1;
452 	INIT_DELAYED_WORK(&ev->dwork, disk_events_workfn);
453 
454 	disk->ev = ev;
455 	return 0;
456 }
457 
disk_add_events(struct gendisk * disk)458 void disk_add_events(struct gendisk *disk)
459 {
460 	if (!disk->ev)
461 		return;
462 
463 	mutex_lock(&disk_events_mutex);
464 	list_add_tail(&disk->ev->node, &disk_events);
465 	mutex_unlock(&disk_events_mutex);
466 
467 	/*
468 	 * Block count is initialized to 1 and the following initial
469 	 * unblock kicks it into action.
470 	 */
471 	__disk_unblock_events(disk, true);
472 }
473 
disk_del_events(struct gendisk * disk)474 void disk_del_events(struct gendisk *disk)
475 {
476 	if (disk->ev) {
477 		disk_block_events(disk);
478 
479 		mutex_lock(&disk_events_mutex);
480 		list_del_init(&disk->ev->node);
481 		mutex_unlock(&disk_events_mutex);
482 	}
483 }
484 
disk_release_events(struct gendisk * disk)485 void disk_release_events(struct gendisk *disk)
486 {
487 	/* the block count should be 1 from disk_del_events() */
488 	WARN_ON_ONCE(disk->ev && disk->ev->block != 1);
489 	kfree(disk->ev);
490 }
491