193c68cc4SChristoph Böhmwalder // SPDX-License-Identifier: GPL-2.0-only
2b411b363SPhilipp Reisner /*
3b411b363SPhilipp Reisner drbd_worker.c
4b411b363SPhilipp Reisner
5b411b363SPhilipp Reisner This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6b411b363SPhilipp Reisner
7b411b363SPhilipp Reisner Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
8b411b363SPhilipp Reisner Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
9b411b363SPhilipp Reisner Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10b411b363SPhilipp Reisner
11b411b363SPhilipp Reisner
12b411b363SPhilipp Reisner */
13b411b363SPhilipp Reisner
14b411b363SPhilipp Reisner #include <linux/module.h>
15b411b363SPhilipp Reisner #include <linux/drbd.h>
16174cd4b1SIngo Molnar #include <linux/sched/signal.h>
17b411b363SPhilipp Reisner #include <linux/wait.h>
18b411b363SPhilipp Reisner #include <linux/mm.h>
19b411b363SPhilipp Reisner #include <linux/memcontrol.h>
20b411b363SPhilipp Reisner #include <linux/mm_inline.h>
21b411b363SPhilipp Reisner #include <linux/slab.h>
22b411b363SPhilipp Reisner #include <linux/random.h>
23b411b363SPhilipp Reisner #include <linux/string.h>
24b411b363SPhilipp Reisner #include <linux/scatterlist.h>
25c6a564ffSChristoph Hellwig #include <linux/part_stat.h>
26b411b363SPhilipp Reisner
27b411b363SPhilipp Reisner #include "drbd_int.h"
28a3603a6eSAndreas Gruenbacher #include "drbd_protocol.h"
29b411b363SPhilipp Reisner #include "drbd_req.h"
30b411b363SPhilipp Reisner
310d11f3cfSChristoph Böhmwalder static int make_ov_request(struct drbd_peer_device *, int);
320d11f3cfSChristoph Böhmwalder static int make_resync_request(struct drbd_peer_device *, int);
33b411b363SPhilipp Reisner
34c5a91619SAndreas Gruenbacher /* endio handlers:
35ed15b795SAndreas Gruenbacher * drbd_md_endio (defined here)
36fcefa62eSAndreas Gruenbacher * drbd_request_endio (defined here)
37fcefa62eSAndreas Gruenbacher * drbd_peer_request_endio (defined here)
38ed15b795SAndreas Gruenbacher * drbd_bm_endio (defined in drbd_bitmap.c)
39c5a91619SAndreas Gruenbacher *
40b411b363SPhilipp Reisner * For all these callbacks, note the following:
41b411b363SPhilipp Reisner * The callbacks will be called in irq context by the IDE drivers,
42b411b363SPhilipp Reisner * and in Softirqs/Tasklets/BH context by the SCSI drivers.
43b411b363SPhilipp Reisner * Try to get the locking right :)
44b411b363SPhilipp Reisner *
45b411b363SPhilipp Reisner */
46b411b363SPhilipp Reisner
47b411b363SPhilipp Reisner /* used for synchronous meta data and bitmap IO
48b411b363SPhilipp Reisner * submitted by drbd_md_sync_page_io()
49b411b363SPhilipp Reisner */
drbd_md_endio(struct bio * bio)504246a0b6SChristoph Hellwig void drbd_md_endio(struct bio *bio)
51b411b363SPhilipp Reisner {
52b30ab791SAndreas Gruenbacher struct drbd_device *device;
53b411b363SPhilipp Reisner
54e37d2438SLars Ellenberg device = bio->bi_private;
554e4cbee9SChristoph Hellwig device->md_io.error = blk_status_to_errno(bio->bi_status);
56b411b363SPhilipp Reisner
577c752ed3SLars Ellenberg /* special case: drbd_md_read() during drbd_adm_attach() */
587c752ed3SLars Ellenberg if (device->ldev)
597c752ed3SLars Ellenberg put_ldev(device);
607c752ed3SLars Ellenberg bio_put(bio);
617c752ed3SLars Ellenberg
620cfac5ddSPhilipp Reisner /* We grabbed an extra reference in _drbd_md_sync_page_io() to be able
630cfac5ddSPhilipp Reisner * to timeout on the lower level device, and eventually detach from it.
640cfac5ddSPhilipp Reisner * If this io completion runs after that timeout expired, this
650cfac5ddSPhilipp Reisner * drbd_md_put_buffer() may allow us to finally try and re-attach.
660cfac5ddSPhilipp Reisner * During normal operation, this only puts that extra reference
670cfac5ddSPhilipp Reisner * down to 1 again.
680cfac5ddSPhilipp Reisner * Make sure we first drop the reference, and only then signal
690cfac5ddSPhilipp Reisner * completion, or we may (in drbd_al_read_log()) cycle so fast into the
700cfac5ddSPhilipp Reisner * next drbd_md_sync_page_io(), that we trigger the
71b30ab791SAndreas Gruenbacher * ASSERT(atomic_read(&device->md_io_in_use) == 1) there.
720cfac5ddSPhilipp Reisner */
73b30ab791SAndreas Gruenbacher drbd_md_put_buffer(device);
74e37d2438SLars Ellenberg device->md_io.done = 1;
75b30ab791SAndreas Gruenbacher wake_up(&device->misc_wait);
76b411b363SPhilipp Reisner }
77b411b363SPhilipp Reisner
78b411b363SPhilipp Reisner /* reads on behalf of the partner,
79b411b363SPhilipp Reisner * "submitted" by the receiver
80b411b363SPhilipp Reisner */
drbd_endio_read_sec_final(struct drbd_peer_request * peer_req)81a186e478SRashika Kheria static void drbd_endio_read_sec_final(struct drbd_peer_request *peer_req) __releases(local)
82b411b363SPhilipp Reisner {
83b411b363SPhilipp Reisner unsigned long flags = 0;
846780139cSAndreas Gruenbacher struct drbd_peer_device *peer_device = peer_req->peer_device;
856780139cSAndreas Gruenbacher struct drbd_device *device = peer_device->device;
86b411b363SPhilipp Reisner
870500813fSAndreas Gruenbacher spin_lock_irqsave(&device->resource->req_lock, flags);
88b30ab791SAndreas Gruenbacher device->read_cnt += peer_req->i.size >> 9;
89a8cd15baSAndreas Gruenbacher list_del(&peer_req->w.list);
90b30ab791SAndreas Gruenbacher if (list_empty(&device->read_ee))
91b30ab791SAndreas Gruenbacher wake_up(&device->ee_wait);
92db830c46SAndreas Gruenbacher if (test_bit(__EE_WAS_ERROR, &peer_req->flags))
93b30ab791SAndreas Gruenbacher __drbd_chk_io_error(device, DRBD_READ_ERROR);
940500813fSAndreas Gruenbacher spin_unlock_irqrestore(&device->resource->req_lock, flags);
95b411b363SPhilipp Reisner
966780139cSAndreas Gruenbacher drbd_queue_work(&peer_device->connection->sender_work, &peer_req->w);
97b30ab791SAndreas Gruenbacher put_ldev(device);
98b411b363SPhilipp Reisner }
99b411b363SPhilipp Reisner
100b411b363SPhilipp Reisner /* writes on behalf of the partner, or resync writes,
10145bb912bSLars Ellenberg * "submitted" by the receiver, final stage. */
drbd_endio_write_sec_final(struct drbd_peer_request * peer_req)102a0fb3c47SLars Ellenberg void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __releases(local)
103b411b363SPhilipp Reisner {
104b411b363SPhilipp Reisner unsigned long flags = 0;
1056780139cSAndreas Gruenbacher struct drbd_peer_device *peer_device = peer_req->peer_device;
1066780139cSAndreas Gruenbacher struct drbd_device *device = peer_device->device;
107668700b4SPhilipp Reisner struct drbd_connection *connection = peer_device->connection;
108181286adSLars Ellenberg struct drbd_interval i;
109b411b363SPhilipp Reisner int do_wake;
110579b57edSAndreas Gruenbacher u64 block_id;
111b411b363SPhilipp Reisner int do_al_complete_io;
112b411b363SPhilipp Reisner
113db830c46SAndreas Gruenbacher /* after we moved peer_req to done_ee,
114b411b363SPhilipp Reisner * we may no longer access it,
115b411b363SPhilipp Reisner * it may be freed/reused already!
116b411b363SPhilipp Reisner * (as soon as we release the req_lock) */
117181286adSLars Ellenberg i = peer_req->i;
118db830c46SAndreas Gruenbacher do_al_complete_io = peer_req->flags & EE_CALL_AL_COMPLETE_IO;
119db830c46SAndreas Gruenbacher block_id = peer_req->block_id;
12021ae5d7fSLars Ellenberg peer_req->flags &= ~EE_CALL_AL_COMPLETE_IO;
121b411b363SPhilipp Reisner
122e1fbc4caSLars Ellenberg if (peer_req->flags & EE_WAS_ERROR) {
123e1fbc4caSLars Ellenberg /* In protocol != C, we usually do not send write acks.
124e1fbc4caSLars Ellenberg * In case of a write error, send the neg ack anyways. */
125e1fbc4caSLars Ellenberg if (!__test_and_set_bit(__EE_SEND_WRITE_ACK, &peer_req->flags))
126e1fbc4caSLars Ellenberg inc_unacked(device);
1270d11f3cfSChristoph Böhmwalder drbd_set_out_of_sync(peer_device, peer_req->i.sector, peer_req->i.size);
128e1fbc4caSLars Ellenberg }
129e1fbc4caSLars Ellenberg
1300500813fSAndreas Gruenbacher spin_lock_irqsave(&device->resource->req_lock, flags);
131b30ab791SAndreas Gruenbacher device->writ_cnt += peer_req->i.size >> 9;
132a8cd15baSAndreas Gruenbacher list_move_tail(&peer_req->w.list, &device->done_ee);
133b411b363SPhilipp Reisner
134bb3bfe96SAndreas Gruenbacher /*
1355e472264SAndreas Gruenbacher * Do not remove from the write_requests tree here: we did not send the
136bb3bfe96SAndreas Gruenbacher * Ack yet and did not wake possibly waiting conflicting requests.
137bb3bfe96SAndreas Gruenbacher * Removed from the tree from "drbd_process_done_ee" within the
13884b8c06bSAndreas Gruenbacher * appropriate dw.cb (e_end_block/e_end_resync_block) or from
139bb3bfe96SAndreas Gruenbacher * _drbd_clear_done_ee.
140bb3bfe96SAndreas Gruenbacher */
141b411b363SPhilipp Reisner
142b30ab791SAndreas Gruenbacher do_wake = list_empty(block_id == ID_SYNCER ? &device->sync_ee : &device->active_ee);
143b411b363SPhilipp Reisner
1449305455aSBart Van Assche /* FIXME do we want to detach for failed REQ_OP_DISCARD?
145f31e583aSLars Ellenberg * ((peer_req->flags & (EE_WAS_ERROR|EE_TRIM)) == EE_WAS_ERROR) */
146a0fb3c47SLars Ellenberg if (peer_req->flags & EE_WAS_ERROR)
147b30ab791SAndreas Gruenbacher __drbd_chk_io_error(device, DRBD_WRITE_ERROR);
148668700b4SPhilipp Reisner
149668700b4SPhilipp Reisner if (connection->cstate >= C_WF_REPORT_PARAMS) {
150668700b4SPhilipp Reisner kref_get(&device->kref); /* put is in drbd_send_acks_wf() */
151668700b4SPhilipp Reisner if (!queue_work(connection->ack_sender, &peer_device->send_acks_work))
152668700b4SPhilipp Reisner kref_put(&device->kref, drbd_destroy_device);
153668700b4SPhilipp Reisner }
1540500813fSAndreas Gruenbacher spin_unlock_irqrestore(&device->resource->req_lock, flags);
155b411b363SPhilipp Reisner
156579b57edSAndreas Gruenbacher if (block_id == ID_SYNCER)
157b30ab791SAndreas Gruenbacher drbd_rs_complete_io(device, i.sector);
158b411b363SPhilipp Reisner
159b411b363SPhilipp Reisner if (do_wake)
160b30ab791SAndreas Gruenbacher wake_up(&device->ee_wait);
161b411b363SPhilipp Reisner
162b411b363SPhilipp Reisner if (do_al_complete_io)
163b30ab791SAndreas Gruenbacher drbd_al_complete_io(device, &i);
164b411b363SPhilipp Reisner
165b30ab791SAndreas Gruenbacher put_ldev(device);
16645bb912bSLars Ellenberg }
167b411b363SPhilipp Reisner
16845bb912bSLars Ellenberg /* writes on behalf of the partner, or resync writes,
16945bb912bSLars Ellenberg * "submitted" by the receiver.
17045bb912bSLars Ellenberg */
drbd_peer_request_endio(struct bio * bio)1714246a0b6SChristoph Hellwig void drbd_peer_request_endio(struct bio *bio)
17245bb912bSLars Ellenberg {
173db830c46SAndreas Gruenbacher struct drbd_peer_request *peer_req = bio->bi_private;
174a8cd15baSAndreas Gruenbacher struct drbd_device *device = peer_req->peer_device->device;
1757e5fec31SFabian Frederick bool is_write = bio_data_dir(bio) == WRITE;
17645c21793SChristoph Hellwig bool is_discard = bio_op(bio) == REQ_OP_WRITE_ZEROES ||
17745c21793SChristoph Hellwig bio_op(bio) == REQ_OP_DISCARD;
17845bb912bSLars Ellenberg
179e3fa02d7SChristoph Böhmwalder if (bio->bi_status && drbd_ratelimit())
180d0180171SAndreas Gruenbacher drbd_warn(device, "%s: error=%d s=%llus\n",
181a0fb3c47SLars Ellenberg is_write ? (is_discard ? "discard" : "write")
1824e4cbee9SChristoph Hellwig : "read", bio->bi_status,
183db830c46SAndreas Gruenbacher (unsigned long long)peer_req->i.sector);
18445bb912bSLars Ellenberg
1854e4cbee9SChristoph Hellwig if (bio->bi_status)
186db830c46SAndreas Gruenbacher set_bit(__EE_WAS_ERROR, &peer_req->flags);
18745bb912bSLars Ellenberg
18845bb912bSLars Ellenberg bio_put(bio); /* no need for the bio anymore */
189db830c46SAndreas Gruenbacher if (atomic_dec_and_test(&peer_req->pending_bios)) {
19045bb912bSLars Ellenberg if (is_write)
191db830c46SAndreas Gruenbacher drbd_endio_write_sec_final(peer_req);
19245bb912bSLars Ellenberg else
193db830c46SAndreas Gruenbacher drbd_endio_read_sec_final(peer_req);
19445bb912bSLars Ellenberg }
195b411b363SPhilipp Reisner }
196b411b363SPhilipp Reisner
1971ffa7bfaSBaoyou Xie static void
drbd_panic_after_delayed_completion_of_aborted_request(struct drbd_device * device)1981ffa7bfaSBaoyou Xie drbd_panic_after_delayed_completion_of_aborted_request(struct drbd_device *device)
199142207f7SLars Ellenberg {
200142207f7SLars Ellenberg panic("drbd%u %s/%u potential random memory corruption caused by delayed completion of aborted local request\n",
201142207f7SLars Ellenberg device->minor, device->resource->name, device->vnr);
202142207f7SLars Ellenberg }
203142207f7SLars Ellenberg
204b411b363SPhilipp Reisner /* read, readA or write requests on R_PRIMARY coming from drbd_make_request
205b411b363SPhilipp Reisner */
drbd_request_endio(struct bio * bio)2064246a0b6SChristoph Hellwig void drbd_request_endio(struct bio *bio)
207b411b363SPhilipp Reisner {
208a115413dSLars Ellenberg unsigned long flags;
209b411b363SPhilipp Reisner struct drbd_request *req = bio->bi_private;
21084b8c06bSAndreas Gruenbacher struct drbd_device *device = req->device;
211a115413dSLars Ellenberg struct bio_and_error m;
212b411b363SPhilipp Reisner enum drbd_req_event what;
2131b6dd252SPhilipp Reisner
2141b6dd252SPhilipp Reisner /* If this request was aborted locally before,
2151b6dd252SPhilipp Reisner * but now was completed "successfully",
2161b6dd252SPhilipp Reisner * chances are that this caused arbitrary data corruption.
2171b6dd252SPhilipp Reisner *
2181b6dd252SPhilipp Reisner * "aborting" requests, or force-detaching the disk, is intended for
2191b6dd252SPhilipp Reisner * completely blocked/hung local backing devices which do no longer
2201b6dd252SPhilipp Reisner * complete requests at all, not even do error completions. In this
2211b6dd252SPhilipp Reisner * situation, usually a hard-reset and failover is the only way out.
2221b6dd252SPhilipp Reisner *
2231b6dd252SPhilipp Reisner * By "aborting", basically faking a local error-completion,
2241b6dd252SPhilipp Reisner * we allow for a more graceful swichover by cleanly migrating services.
2251b6dd252SPhilipp Reisner * Still the affected node has to be rebooted "soon".
2261b6dd252SPhilipp Reisner *
2271b6dd252SPhilipp Reisner * By completing these requests, we allow the upper layers to re-use
2281b6dd252SPhilipp Reisner * the associated data pages.
2291b6dd252SPhilipp Reisner *
2301b6dd252SPhilipp Reisner * If later the local backing device "recovers", and now DMAs some data
2311b6dd252SPhilipp Reisner * from disk into the original request pages, in the best case it will
2321b6dd252SPhilipp Reisner * just put random data into unused pages; but typically it will corrupt
2331b6dd252SPhilipp Reisner * meanwhile completely unrelated data, causing all sorts of damage.
2341b6dd252SPhilipp Reisner *
2351b6dd252SPhilipp Reisner * Which means delayed successful completion,
2361b6dd252SPhilipp Reisner * especially for READ requests,
2371b6dd252SPhilipp Reisner * is a reason to panic().
2381b6dd252SPhilipp Reisner *
2391b6dd252SPhilipp Reisner * We assume that a delayed *error* completion is OK,
2401b6dd252SPhilipp Reisner * though we still will complain noisily about it.
2411b6dd252SPhilipp Reisner */
2421b6dd252SPhilipp Reisner if (unlikely(req->rq_state & RQ_LOCAL_ABORTED)) {
243e3fa02d7SChristoph Böhmwalder if (drbd_ratelimit())
244d0180171SAndreas Gruenbacher drbd_emerg(device, "delayed completion of aborted local request; disk-timeout may be too aggressive\n");
2451b6dd252SPhilipp Reisner
2464e4cbee9SChristoph Hellwig if (!bio->bi_status)
247142207f7SLars Ellenberg drbd_panic_after_delayed_completion_of_aborted_request(device);
2481b6dd252SPhilipp Reisner }
2491b6dd252SPhilipp Reisner
250b411b363SPhilipp Reisner /* to avoid recursion in __req_mod */
2514e4cbee9SChristoph Hellwig if (unlikely(bio->bi_status)) {
25270246286SChristoph Hellwig switch (bio_op(bio)) {
25345c21793SChristoph Hellwig case REQ_OP_WRITE_ZEROES:
25470246286SChristoph Hellwig case REQ_OP_DISCARD:
2554e4cbee9SChristoph Hellwig if (bio->bi_status == BLK_STS_NOTSUPP)
25670246286SChristoph Hellwig what = DISCARD_COMPLETED_NOTSUPP;
2572f632aebSLars Ellenberg else
25870246286SChristoph Hellwig what = DISCARD_COMPLETED_WITH_ERROR;
25970246286SChristoph Hellwig break;
26070246286SChristoph Hellwig case REQ_OP_READ:
2611eff9d32SJens Axboe if (bio->bi_opf & REQ_RAHEAD)
26270246286SChristoph Hellwig what = READ_AHEAD_COMPLETED_WITH_ERROR;
26370246286SChristoph Hellwig else
26470246286SChristoph Hellwig what = READ_COMPLETED_WITH_ERROR;
26570246286SChristoph Hellwig break;
26670246286SChristoph Hellwig default:
26770246286SChristoph Hellwig what = WRITE_COMPLETED_WITH_ERROR;
26870246286SChristoph Hellwig break;
26970246286SChristoph Hellwig }
27070246286SChristoph Hellwig } else {
2718554df1cSAndreas Gruenbacher what = COMPLETED_OK;
27270246286SChristoph Hellwig }
273b411b363SPhilipp Reisner
2744e4cbee9SChristoph Hellwig req->private_bio = ERR_PTR(blk_status_to_errno(bio->bi_status));
27564dafbc9SLars Ellenberg bio_put(bio);
276b411b363SPhilipp Reisner
277a115413dSLars Ellenberg /* not req_mod(), we need irqsave here! */
2780500813fSAndreas Gruenbacher spin_lock_irqsave(&device->resource->req_lock, flags);
279ad878a0dSChristoph Böhmwalder __req_mod(req, what, NULL, &m);
2800500813fSAndreas Gruenbacher spin_unlock_irqrestore(&device->resource->req_lock, flags);
281b30ab791SAndreas Gruenbacher put_ldev(device);
282a115413dSLars Ellenberg
283a115413dSLars Ellenberg if (m.bio)
284b30ab791SAndreas Gruenbacher complete_master_bio(device, &m);
285b411b363SPhilipp Reisner }
286b411b363SPhilipp Reisner
drbd_csum_ee(struct crypto_shash * tfm,struct drbd_peer_request * peer_req,void * digest)2873d0e6375SKees Cook void drbd_csum_ee(struct crypto_shash *tfm, struct drbd_peer_request *peer_req, void *digest)
28845bb912bSLars Ellenberg {
2893d0e6375SKees Cook SHASH_DESC_ON_STACK(desc, tfm);
290db830c46SAndreas Gruenbacher struct page *page = peer_req->pages;
29145bb912bSLars Ellenberg struct page *tmp;
29245bb912bSLars Ellenberg unsigned len;
2933d0e6375SKees Cook void *src;
29445bb912bSLars Ellenberg
2953d0e6375SKees Cook desc->tfm = tfm;
29645bb912bSLars Ellenberg
2973d0e6375SKees Cook crypto_shash_init(desc);
29845bb912bSLars Ellenberg
2993d0e6375SKees Cook src = kmap_atomic(page);
30045bb912bSLars Ellenberg while ((tmp = page_chain_next(page))) {
30145bb912bSLars Ellenberg /* all but the last page will be fully used */
3023d0e6375SKees Cook crypto_shash_update(desc, src, PAGE_SIZE);
3033d0e6375SKees Cook kunmap_atomic(src);
30445bb912bSLars Ellenberg page = tmp;
3053d0e6375SKees Cook src = kmap_atomic(page);
30645bb912bSLars Ellenberg }
30745bb912bSLars Ellenberg /* and now the last, possibly only partially used page */
308db830c46SAndreas Gruenbacher len = peer_req->i.size & (PAGE_SIZE - 1);
3093d0e6375SKees Cook crypto_shash_update(desc, src, len ?: PAGE_SIZE);
3103d0e6375SKees Cook kunmap_atomic(src);
3113d0e6375SKees Cook
3123d0e6375SKees Cook crypto_shash_final(desc, digest);
3133d0e6375SKees Cook shash_desc_zero(desc);
31445bb912bSLars Ellenberg }
31545bb912bSLars Ellenberg
drbd_csum_bio(struct crypto_shash * tfm,struct bio * bio,void * digest)3163d0e6375SKees Cook void drbd_csum_bio(struct crypto_shash *tfm, struct bio *bio, void *digest)
317b411b363SPhilipp Reisner {
3183d0e6375SKees Cook SHASH_DESC_ON_STACK(desc, tfm);
3197988613bSKent Overstreet struct bio_vec bvec;
3207988613bSKent Overstreet struct bvec_iter iter;
321b411b363SPhilipp Reisner
3223d0e6375SKees Cook desc->tfm = tfm;
323b411b363SPhilipp Reisner
3243d0e6375SKees Cook crypto_shash_init(desc);
325b411b363SPhilipp Reisner
3267988613bSKent Overstreet bio_for_each_segment(bvec, bio, iter) {
3273d0e6375SKees Cook u8 *src;
3283d0e6375SKees Cook
32947227850SChristoph Hellwig src = bvec_kmap_local(&bvec);
33047227850SChristoph Hellwig crypto_shash_update(desc, src, bvec.bv_len);
33147227850SChristoph Hellwig kunmap_local(src);
332b411b363SPhilipp Reisner }
3333d0e6375SKees Cook crypto_shash_final(desc, digest);
3343d0e6375SKees Cook shash_desc_zero(desc);
335b411b363SPhilipp Reisner }
336b411b363SPhilipp Reisner
3379676c760SLars Ellenberg /* MAYBE merge common code with w_e_end_ov_req */
w_e_send_csum(struct drbd_work * w,int cancel)33899920dc5SAndreas Gruenbacher static int w_e_send_csum(struct drbd_work *w, int cancel)
339b411b363SPhilipp Reisner {
340a8cd15baSAndreas Gruenbacher struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
3416780139cSAndreas Gruenbacher struct drbd_peer_device *peer_device = peer_req->peer_device;
3426780139cSAndreas Gruenbacher struct drbd_device *device = peer_device->device;
343b411b363SPhilipp Reisner int digest_size;
344b411b363SPhilipp Reisner void *digest;
34599920dc5SAndreas Gruenbacher int err = 0;
346b411b363SPhilipp Reisner
34753ea4331SLars Ellenberg if (unlikely(cancel))
34853ea4331SLars Ellenberg goto out;
349b411b363SPhilipp Reisner
3509676c760SLars Ellenberg if (unlikely((peer_req->flags & EE_WAS_ERROR) != 0))
35153ea4331SLars Ellenberg goto out;
35253ea4331SLars Ellenberg
3533d0e6375SKees Cook digest_size = crypto_shash_digestsize(peer_device->connection->csums_tfm);
354b411b363SPhilipp Reisner digest = kmalloc(digest_size, GFP_NOIO);
355b411b363SPhilipp Reisner if (digest) {
356db830c46SAndreas Gruenbacher sector_t sector = peer_req->i.sector;
357db830c46SAndreas Gruenbacher unsigned int size = peer_req->i.size;
3586780139cSAndreas Gruenbacher drbd_csum_ee(peer_device->connection->csums_tfm, peer_req, digest);
3599676c760SLars Ellenberg /* Free peer_req and pages before send.
36053ea4331SLars Ellenberg * In case we block on congestion, we could otherwise run into
36153ea4331SLars Ellenberg * some distributed deadlock, if the other side blocks on
36253ea4331SLars Ellenberg * congestion as well, because our receiver blocks in
363c37c8ecfSAndreas Gruenbacher * drbd_alloc_pages due to pp_in_use > max_buffers. */
364b30ab791SAndreas Gruenbacher drbd_free_peer_req(device, peer_req);
365db830c46SAndreas Gruenbacher peer_req = NULL;
3660d11f3cfSChristoph Böhmwalder inc_rs_pending(peer_device);
3676780139cSAndreas Gruenbacher err = drbd_send_drequest_csum(peer_device, sector, size,
36853ea4331SLars Ellenberg digest, digest_size,
369b411b363SPhilipp Reisner P_CSUM_RS_REQUEST);
370b411b363SPhilipp Reisner kfree(digest);
371b411b363SPhilipp Reisner } else {
372d0180171SAndreas Gruenbacher drbd_err(device, "kmalloc() of digest failed.\n");
37399920dc5SAndreas Gruenbacher err = -ENOMEM;
374b411b363SPhilipp Reisner }
375b411b363SPhilipp Reisner
37653ea4331SLars Ellenberg out:
377db830c46SAndreas Gruenbacher if (peer_req)
378b30ab791SAndreas Gruenbacher drbd_free_peer_req(device, peer_req);
379b411b363SPhilipp Reisner
38099920dc5SAndreas Gruenbacher if (unlikely(err))
381d0180171SAndreas Gruenbacher drbd_err(device, "drbd_send_drequest(..., csum) failed\n");
38299920dc5SAndreas Gruenbacher return err;
383b411b363SPhilipp Reisner }
384b411b363SPhilipp Reisner
385b411b363SPhilipp Reisner #define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
386b411b363SPhilipp Reisner
read_for_csum(struct drbd_peer_device * peer_device,sector_t sector,int size)38769a22773SAndreas Gruenbacher static int read_for_csum(struct drbd_peer_device *peer_device, sector_t sector, int size)
388b411b363SPhilipp Reisner {
38969a22773SAndreas Gruenbacher struct drbd_device *device = peer_device->device;
390db830c46SAndreas Gruenbacher struct drbd_peer_request *peer_req;
391b411b363SPhilipp Reisner
392b30ab791SAndreas Gruenbacher if (!get_ldev(device))
39380a40e43SLars Ellenberg return -EIO;
394b411b363SPhilipp Reisner
395b411b363SPhilipp Reisner /* GFP_TRY, because if there is no memory available right now, this may
396b411b363SPhilipp Reisner * be rescheduled for later. It is "only" background resync, after all. */
39769a22773SAndreas Gruenbacher peer_req = drbd_alloc_peer_req(peer_device, ID_SYNCER /* unused */, sector,
3989104d31aSLars Ellenberg size, size, GFP_TRY);
399db830c46SAndreas Gruenbacher if (!peer_req)
40080a40e43SLars Ellenberg goto defer;
401b411b363SPhilipp Reisner
402a8cd15baSAndreas Gruenbacher peer_req->w.cb = w_e_send_csum;
403ce668b6dSChristoph Böhmwalder peer_req->opf = REQ_OP_READ;
4040500813fSAndreas Gruenbacher spin_lock_irq(&device->resource->req_lock);
405b9ed7080SLars Ellenberg list_add_tail(&peer_req->w.list, &device->read_ee);
4060500813fSAndreas Gruenbacher spin_unlock_irq(&device->resource->req_lock);
407b411b363SPhilipp Reisner
408b30ab791SAndreas Gruenbacher atomic_add(size >> 9, &device->rs_sect_ev);
409ce668b6dSChristoph Böhmwalder if (drbd_submit_peer_request(peer_req) == 0)
41080a40e43SLars Ellenberg return 0;
41145bb912bSLars Ellenberg
41210f6d992SLars Ellenberg /* If it failed because of ENOMEM, retry should help. If it failed
41310f6d992SLars Ellenberg * because bio_add_page failed (probably broken lower level driver),
41410f6d992SLars Ellenberg * retry may or may not help.
41510f6d992SLars Ellenberg * If it does not, you may need to force disconnect. */
4160500813fSAndreas Gruenbacher spin_lock_irq(&device->resource->req_lock);
417a8cd15baSAndreas Gruenbacher list_del(&peer_req->w.list);
4180500813fSAndreas Gruenbacher spin_unlock_irq(&device->resource->req_lock);
41922cc37a9SLars Ellenberg
420b30ab791SAndreas Gruenbacher drbd_free_peer_req(device, peer_req);
42180a40e43SLars Ellenberg defer:
422b30ab791SAndreas Gruenbacher put_ldev(device);
42380a40e43SLars Ellenberg return -EAGAIN;
424b411b363SPhilipp Reisner }
425b411b363SPhilipp Reisner
w_resync_timer(struct drbd_work * w,int cancel)42699920dc5SAndreas Gruenbacher int w_resync_timer(struct drbd_work *w, int cancel)
427794abb75SPhilipp Reisner {
42884b8c06bSAndreas Gruenbacher struct drbd_device *device =
42984b8c06bSAndreas Gruenbacher container_of(w, struct drbd_device, resync_work);
43084b8c06bSAndreas Gruenbacher
431b30ab791SAndreas Gruenbacher switch (device->state.conn) {
432794abb75SPhilipp Reisner case C_VERIFY_S:
4330d11f3cfSChristoph Böhmwalder make_ov_request(first_peer_device(device), cancel);
434794abb75SPhilipp Reisner break;
435794abb75SPhilipp Reisner case C_SYNC_TARGET:
4360d11f3cfSChristoph Böhmwalder make_resync_request(first_peer_device(device), cancel);
437794abb75SPhilipp Reisner break;
438794abb75SPhilipp Reisner }
439794abb75SPhilipp Reisner
44099920dc5SAndreas Gruenbacher return 0;
441794abb75SPhilipp Reisner }
442794abb75SPhilipp Reisner
resync_timer_fn(struct timer_list * t)4432bccef39SKees Cook void resync_timer_fn(struct timer_list *t)
444b411b363SPhilipp Reisner {
445*41cb0855SIngo Molnar struct drbd_device *device = timer_container_of(device, t,
446*41cb0855SIngo Molnar resync_timer);
447b411b363SPhilipp Reisner
44815e26f6aSLars Ellenberg drbd_queue_work_if_unqueued(
44915e26f6aSLars Ellenberg &first_peer_device(device)->connection->sender_work,
45084b8c06bSAndreas Gruenbacher &device->resync_work);
451b411b363SPhilipp Reisner }
452b411b363SPhilipp Reisner
fifo_set(struct fifo_buffer * fb,int value)453778f271dSPhilipp Reisner static void fifo_set(struct fifo_buffer *fb, int value)
454778f271dSPhilipp Reisner {
455778f271dSPhilipp Reisner int i;
456778f271dSPhilipp Reisner
457778f271dSPhilipp Reisner for (i = 0; i < fb->size; i++)
458f10f2623SPhilipp Reisner fb->values[i] = value;
459778f271dSPhilipp Reisner }
460778f271dSPhilipp Reisner
fifo_push(struct fifo_buffer * fb,int value)461778f271dSPhilipp Reisner static int fifo_push(struct fifo_buffer *fb, int value)
462778f271dSPhilipp Reisner {
463778f271dSPhilipp Reisner int ov;
464778f271dSPhilipp Reisner
465778f271dSPhilipp Reisner ov = fb->values[fb->head_index];
466778f271dSPhilipp Reisner fb->values[fb->head_index++] = value;
467778f271dSPhilipp Reisner
468778f271dSPhilipp Reisner if (fb->head_index >= fb->size)
469778f271dSPhilipp Reisner fb->head_index = 0;
470778f271dSPhilipp Reisner
471778f271dSPhilipp Reisner return ov;
472778f271dSPhilipp Reisner }
473778f271dSPhilipp Reisner
fifo_add_val(struct fifo_buffer * fb,int value)474778f271dSPhilipp Reisner static void fifo_add_val(struct fifo_buffer *fb, int value)
475778f271dSPhilipp Reisner {
476778f271dSPhilipp Reisner int i;
477778f271dSPhilipp Reisner
478778f271dSPhilipp Reisner for (i = 0; i < fb->size; i++)
479778f271dSPhilipp Reisner fb->values[i] += value;
480778f271dSPhilipp Reisner }
481778f271dSPhilipp Reisner
fifo_alloc(unsigned int fifo_size)4826a365874SStephen Kitt struct fifo_buffer *fifo_alloc(unsigned int fifo_size)
4839958c857SPhilipp Reisner {
4849958c857SPhilipp Reisner struct fifo_buffer *fb;
4859958c857SPhilipp Reisner
4866a365874SStephen Kitt fb = kzalloc(struct_size(fb, values, fifo_size), GFP_NOIO);
4879958c857SPhilipp Reisner if (!fb)
4889958c857SPhilipp Reisner return NULL;
4899958c857SPhilipp Reisner
4909958c857SPhilipp Reisner fb->head_index = 0;
4919958c857SPhilipp Reisner fb->size = fifo_size;
4929958c857SPhilipp Reisner fb->total = 0;
4939958c857SPhilipp Reisner
4949958c857SPhilipp Reisner return fb;
4959958c857SPhilipp Reisner }
4969958c857SPhilipp Reisner
drbd_rs_controller(struct drbd_peer_device * peer_device,unsigned int sect_in)4970d11f3cfSChristoph Böhmwalder static int drbd_rs_controller(struct drbd_peer_device *peer_device, unsigned int sect_in)
498778f271dSPhilipp Reisner {
4990d11f3cfSChristoph Böhmwalder struct drbd_device *device = peer_device->device;
500daeda1ccSPhilipp Reisner struct disk_conf *dc;
5017f34f614SLars Ellenberg unsigned int want; /* The number of sectors we want in-flight */
502778f271dSPhilipp Reisner int req_sect; /* Number of sectors to request in this turn */
5037f34f614SLars Ellenberg int correction; /* Number of sectors more we need in-flight */
504778f271dSPhilipp Reisner int cps; /* correction per invocation of drbd_rs_controller() */
505778f271dSPhilipp Reisner int steps; /* Number of time steps to plan ahead */
506778f271dSPhilipp Reisner int curr_corr;
507778f271dSPhilipp Reisner int max_sect;
508813472ceSPhilipp Reisner struct fifo_buffer *plan;
509778f271dSPhilipp Reisner
510b30ab791SAndreas Gruenbacher dc = rcu_dereference(device->ldev->disk_conf);
511b30ab791SAndreas Gruenbacher plan = rcu_dereference(device->rs_plan_s);
512778f271dSPhilipp Reisner
513813472ceSPhilipp Reisner steps = plan->size; /* (dc->c_plan_ahead * 10 * SLEEP_TIME) / HZ; */
514778f271dSPhilipp Reisner
515b30ab791SAndreas Gruenbacher if (device->rs_in_flight + sect_in == 0) { /* At start of resync */
516daeda1ccSPhilipp Reisner want = ((dc->resync_rate * 2 * SLEEP_TIME) / HZ) * steps;
517778f271dSPhilipp Reisner } else { /* normal path */
518daeda1ccSPhilipp Reisner want = dc->c_fill_target ? dc->c_fill_target :
519daeda1ccSPhilipp Reisner sect_in * dc->c_delay_target * HZ / (SLEEP_TIME * 10);
520778f271dSPhilipp Reisner }
521778f271dSPhilipp Reisner
522b30ab791SAndreas Gruenbacher correction = want - device->rs_in_flight - plan->total;
523778f271dSPhilipp Reisner
524778f271dSPhilipp Reisner /* Plan ahead */
525778f271dSPhilipp Reisner cps = correction / steps;
526813472ceSPhilipp Reisner fifo_add_val(plan, cps);
527813472ceSPhilipp Reisner plan->total += cps * steps;
528778f271dSPhilipp Reisner
529778f271dSPhilipp Reisner /* What we do in this step */
530813472ceSPhilipp Reisner curr_corr = fifo_push(plan, 0);
531813472ceSPhilipp Reisner plan->total -= curr_corr;
532778f271dSPhilipp Reisner
533778f271dSPhilipp Reisner req_sect = sect_in + curr_corr;
534778f271dSPhilipp Reisner if (req_sect < 0)
535778f271dSPhilipp Reisner req_sect = 0;
536778f271dSPhilipp Reisner
537daeda1ccSPhilipp Reisner max_sect = (dc->c_max_rate * 2 * SLEEP_TIME) / HZ;
538778f271dSPhilipp Reisner if (req_sect > max_sect)
539778f271dSPhilipp Reisner req_sect = max_sect;
540778f271dSPhilipp Reisner
541778f271dSPhilipp Reisner /*
542d0180171SAndreas Gruenbacher drbd_warn(device, "si=%u if=%d wa=%u co=%d st=%d cps=%d pl=%d cc=%d rs=%d\n",
543b30ab791SAndreas Gruenbacher sect_in, device->rs_in_flight, want, correction,
544b30ab791SAndreas Gruenbacher steps, cps, device->rs_planed, curr_corr, req_sect);
545778f271dSPhilipp Reisner */
546778f271dSPhilipp Reisner
547778f271dSPhilipp Reisner return req_sect;
548778f271dSPhilipp Reisner }
549778f271dSPhilipp Reisner
drbd_rs_number_requests(struct drbd_peer_device * peer_device)5500d11f3cfSChristoph Böhmwalder static int drbd_rs_number_requests(struct drbd_peer_device *peer_device)
551e65f440dSLars Ellenberg {
5520d11f3cfSChristoph Böhmwalder struct drbd_device *device = peer_device->device;
5530e49d7b0SLars Ellenberg unsigned int sect_in; /* Number of sectors that came in since the last turn */
5540e49d7b0SLars Ellenberg int number, mxb;
5550e49d7b0SLars Ellenberg
5560e49d7b0SLars Ellenberg sect_in = atomic_xchg(&device->rs_sect_in, 0);
5570e49d7b0SLars Ellenberg device->rs_in_flight -= sect_in;
558813472ceSPhilipp Reisner
559813472ceSPhilipp Reisner rcu_read_lock();
5600e49d7b0SLars Ellenberg mxb = drbd_get_max_buffers(device) / 2;
561b30ab791SAndreas Gruenbacher if (rcu_dereference(device->rs_plan_s)->size) {
5620d11f3cfSChristoph Böhmwalder number = drbd_rs_controller(peer_device, sect_in) >> (BM_BLOCK_SHIFT - 9);
563b30ab791SAndreas Gruenbacher device->c_sync_rate = number * HZ * (BM_BLOCK_SIZE / 1024) / SLEEP_TIME;
564e65f440dSLars Ellenberg } else {
565b30ab791SAndreas Gruenbacher device->c_sync_rate = rcu_dereference(device->ldev->disk_conf)->resync_rate;
566b30ab791SAndreas Gruenbacher number = SLEEP_TIME * device->c_sync_rate / ((BM_BLOCK_SIZE / 1024) * HZ);
567e65f440dSLars Ellenberg }
568813472ceSPhilipp Reisner rcu_read_unlock();
569e65f440dSLars Ellenberg
5700e49d7b0SLars Ellenberg /* Don't have more than "max-buffers"/2 in-flight.
5710e49d7b0SLars Ellenberg * Otherwise we may cause the remote site to stall on drbd_alloc_pages(),
5720e49d7b0SLars Ellenberg * potentially causing a distributed deadlock on congestion during
5730e49d7b0SLars Ellenberg * online-verify or (checksum-based) resync, if max-buffers,
5740e49d7b0SLars Ellenberg * socket buffer sizes and resync rate settings are mis-configured. */
5757f34f614SLars Ellenberg
5767f34f614SLars Ellenberg /* note that "number" is in units of "BM_BLOCK_SIZE" (which is 4k),
5777f34f614SLars Ellenberg * mxb (as used here, and in drbd_alloc_pages on the peer) is
5787f34f614SLars Ellenberg * "number of pages" (typically also 4k),
5797f34f614SLars Ellenberg * but "rs_in_flight" is in "sectors" (512 Byte). */
5807f34f614SLars Ellenberg if (mxb - device->rs_in_flight/8 < number)
5817f34f614SLars Ellenberg number = mxb - device->rs_in_flight/8;
5820e49d7b0SLars Ellenberg
583e65f440dSLars Ellenberg return number;
584e65f440dSLars Ellenberg }
585e65f440dSLars Ellenberg
make_resync_request(struct drbd_peer_device * const peer_device,int cancel)5860d11f3cfSChristoph Böhmwalder static int make_resync_request(struct drbd_peer_device *const peer_device, int cancel)
587b411b363SPhilipp Reisner {
5880d11f3cfSChristoph Böhmwalder struct drbd_device *const device = peer_device->device;
58944a4d551SLars Ellenberg struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL;
590b411b363SPhilipp Reisner unsigned long bit;
591b411b363SPhilipp Reisner sector_t sector;
592155bd9d1SChristoph Hellwig const sector_t capacity = get_capacity(device->vdisk);
5931816a2b4SLars Ellenberg int max_bio_size;
594e65f440dSLars Ellenberg int number, rollback_i, size;
595506afb62SLars Ellenberg int align, requeue = 0;
5960f0601f4SLars Ellenberg int i = 0;
59792d94ae6SPhilipp Reisner int discard_granularity = 0;
598b411b363SPhilipp Reisner
599b411b363SPhilipp Reisner if (unlikely(cancel))
60099920dc5SAndreas Gruenbacher return 0;
601b411b363SPhilipp Reisner
602b30ab791SAndreas Gruenbacher if (device->rs_total == 0) {
603af85e8e8SLars Ellenberg /* empty resync? */
6040d11f3cfSChristoph Böhmwalder drbd_resync_finished(peer_device);
60599920dc5SAndreas Gruenbacher return 0;
606af85e8e8SLars Ellenberg }
607af85e8e8SLars Ellenberg
608b30ab791SAndreas Gruenbacher if (!get_ldev(device)) {
609b30ab791SAndreas Gruenbacher /* Since we only need to access device->rsync a
610b30ab791SAndreas Gruenbacher get_ldev_if_state(device,D_FAILED) would be sufficient, but
611b411b363SPhilipp Reisner to continue resync with a broken disk makes no sense at
612b411b363SPhilipp Reisner all */
613d0180171SAndreas Gruenbacher drbd_err(device, "Disk broke down during resync!\n");
61499920dc5SAndreas Gruenbacher return 0;
615b411b363SPhilipp Reisner }
616b411b363SPhilipp Reisner
6179104d31aSLars Ellenberg if (connection->agreed_features & DRBD_FF_THIN_RESYNC) {
61892d94ae6SPhilipp Reisner rcu_read_lock();
61992d94ae6SPhilipp Reisner discard_granularity = rcu_dereference(device->ldev->disk_conf)->rs_discard_granularity;
62092d94ae6SPhilipp Reisner rcu_read_unlock();
62192d94ae6SPhilipp Reisner }
62292d94ae6SPhilipp Reisner
623b30ab791SAndreas Gruenbacher max_bio_size = queue_max_hw_sectors(device->rq_queue) << 9;
6240d11f3cfSChristoph Böhmwalder number = drbd_rs_number_requests(peer_device);
6250e49d7b0SLars Ellenberg if (number <= 0)
6260f0601f4SLars Ellenberg goto requeue;
627b411b363SPhilipp Reisner
628b411b363SPhilipp Reisner for (i = 0; i < number; i++) {
629506afb62SLars Ellenberg /* Stop generating RS requests when half of the send buffer is filled,
630506afb62SLars Ellenberg * but notify TCP that we'd like to have more space. */
63144a4d551SLars Ellenberg mutex_lock(&connection->data.mutex);
63244a4d551SLars Ellenberg if (connection->data.socket) {
633506afb62SLars Ellenberg struct sock *sk = connection->data.socket->sk;
634506afb62SLars Ellenberg int queued = sk->sk_wmem_queued;
635506afb62SLars Ellenberg int sndbuf = sk->sk_sndbuf;
636506afb62SLars Ellenberg if (queued > sndbuf / 2) {
637506afb62SLars Ellenberg requeue = 1;
638506afb62SLars Ellenberg if (sk->sk_socket)
639506afb62SLars Ellenberg set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
640b411b363SPhilipp Reisner }
641506afb62SLars Ellenberg } else
642506afb62SLars Ellenberg requeue = 1;
64344a4d551SLars Ellenberg mutex_unlock(&connection->data.mutex);
644506afb62SLars Ellenberg if (requeue)
645b411b363SPhilipp Reisner goto requeue;
646b411b363SPhilipp Reisner
647b411b363SPhilipp Reisner next_sector:
648b411b363SPhilipp Reisner size = BM_BLOCK_SIZE;
649b30ab791SAndreas Gruenbacher bit = drbd_bm_find_next(device, device->bm_resync_fo);
650b411b363SPhilipp Reisner
6514b0715f0SLars Ellenberg if (bit == DRBD_END_OF_BITMAP) {
652b30ab791SAndreas Gruenbacher device->bm_resync_fo = drbd_bm_bits(device);
653b30ab791SAndreas Gruenbacher put_ldev(device);
65499920dc5SAndreas Gruenbacher return 0;
655b411b363SPhilipp Reisner }
656b411b363SPhilipp Reisner
657b411b363SPhilipp Reisner sector = BM_BIT_TO_SECT(bit);
658b411b363SPhilipp Reisner
6590d11f3cfSChristoph Böhmwalder if (drbd_try_rs_begin_io(peer_device, sector)) {
660b30ab791SAndreas Gruenbacher device->bm_resync_fo = bit;
661b411b363SPhilipp Reisner goto requeue;
662b411b363SPhilipp Reisner }
663b30ab791SAndreas Gruenbacher device->bm_resync_fo = bit + 1;
664b411b363SPhilipp Reisner
665b30ab791SAndreas Gruenbacher if (unlikely(drbd_bm_test_bit(device, bit) == 0)) {
666b30ab791SAndreas Gruenbacher drbd_rs_complete_io(device, sector);
667b411b363SPhilipp Reisner goto next_sector;
668b411b363SPhilipp Reisner }
669b411b363SPhilipp Reisner
6701816a2b4SLars Ellenberg #if DRBD_MAX_BIO_SIZE > BM_BLOCK_SIZE
671b411b363SPhilipp Reisner /* try to find some adjacent bits.
672b411b363SPhilipp Reisner * we stop if we have already the maximum req size.
673b411b363SPhilipp Reisner *
674b411b363SPhilipp Reisner * Additionally always align bigger requests, in order to
675b411b363SPhilipp Reisner * be prepared for all stripe sizes of software RAIDs.
676b411b363SPhilipp Reisner */
677b411b363SPhilipp Reisner align = 1;
678d207450cSPhilipp Reisner rollback_i = i;
6796377b923SLars Ellenberg while (i < number) {
6801816a2b4SLars Ellenberg if (size + BM_BLOCK_SIZE > max_bio_size)
681b411b363SPhilipp Reisner break;
682b411b363SPhilipp Reisner
683b411b363SPhilipp Reisner /* Be always aligned */
684b411b363SPhilipp Reisner if (sector & ((1<<(align+3))-1))
685b411b363SPhilipp Reisner break;
686b411b363SPhilipp Reisner
68792d94ae6SPhilipp Reisner if (discard_granularity && size == discard_granularity)
68892d94ae6SPhilipp Reisner break;
68992d94ae6SPhilipp Reisner
690b411b363SPhilipp Reisner /* do not cross extent boundaries */
691b411b363SPhilipp Reisner if (((bit+1) & BM_BLOCKS_PER_BM_EXT_MASK) == 0)
692b411b363SPhilipp Reisner break;
693b411b363SPhilipp Reisner /* now, is it actually dirty, after all?
694b411b363SPhilipp Reisner * caution, drbd_bm_test_bit is tri-state for some
695b411b363SPhilipp Reisner * obscure reason; ( b == 0 ) would get the out-of-band
696b411b363SPhilipp Reisner * only accidentally right because of the "oddly sized"
697b411b363SPhilipp Reisner * adjustment below */
698b30ab791SAndreas Gruenbacher if (drbd_bm_test_bit(device, bit+1) != 1)
699b411b363SPhilipp Reisner break;
700b411b363SPhilipp Reisner bit++;
701b411b363SPhilipp Reisner size += BM_BLOCK_SIZE;
702b411b363SPhilipp Reisner if ((BM_BLOCK_SIZE << align) <= size)
703b411b363SPhilipp Reisner align++;
704b411b363SPhilipp Reisner i++;
705b411b363SPhilipp Reisner }
706b411b363SPhilipp Reisner /* if we merged some,
707b411b363SPhilipp Reisner * reset the offset to start the next drbd_bm_find_next from */
708b411b363SPhilipp Reisner if (size > BM_BLOCK_SIZE)
709b30ab791SAndreas Gruenbacher device->bm_resync_fo = bit + 1;
710b411b363SPhilipp Reisner #endif
711b411b363SPhilipp Reisner
712b411b363SPhilipp Reisner /* adjust very last sectors, in case we are oddly sized */
713b411b363SPhilipp Reisner if (sector + (size>>9) > capacity)
714b411b363SPhilipp Reisner size = (capacity-sector)<<9;
715aaaba345SLars Ellenberg
716aaaba345SLars Ellenberg if (device->use_csums) {
71744a4d551SLars Ellenberg switch (read_for_csum(peer_device, sector, size)) {
71880a40e43SLars Ellenberg case -EIO: /* Disk failure */
719b30ab791SAndreas Gruenbacher put_ldev(device);
72099920dc5SAndreas Gruenbacher return -EIO;
72180a40e43SLars Ellenberg case -EAGAIN: /* allocation failed, or ldev busy */
722b30ab791SAndreas Gruenbacher drbd_rs_complete_io(device, sector);
723b30ab791SAndreas Gruenbacher device->bm_resync_fo = BM_SECT_TO_BIT(sector);
724d207450cSPhilipp Reisner i = rollback_i;
725b411b363SPhilipp Reisner goto requeue;
72680a40e43SLars Ellenberg case 0:
72780a40e43SLars Ellenberg /* everything ok */
72880a40e43SLars Ellenberg break;
72980a40e43SLars Ellenberg default:
73080a40e43SLars Ellenberg BUG();
731b411b363SPhilipp Reisner }
732b411b363SPhilipp Reisner } else {
73399920dc5SAndreas Gruenbacher int err;
73499920dc5SAndreas Gruenbacher
7350d11f3cfSChristoph Böhmwalder inc_rs_pending(peer_device);
73692d94ae6SPhilipp Reisner err = drbd_send_drequest(peer_device,
73792d94ae6SPhilipp Reisner size == discard_granularity ? P_RS_THIN_REQ : P_RS_DATA_REQUEST,
73899920dc5SAndreas Gruenbacher sector, size, ID_SYNCER);
73999920dc5SAndreas Gruenbacher if (err) {
740d0180171SAndreas Gruenbacher drbd_err(device, "drbd_send_drequest() failed, aborting...\n");
7410d11f3cfSChristoph Böhmwalder dec_rs_pending(peer_device);
742b30ab791SAndreas Gruenbacher put_ldev(device);
74399920dc5SAndreas Gruenbacher return err;
744b411b363SPhilipp Reisner }
745b411b363SPhilipp Reisner }
746b411b363SPhilipp Reisner }
747b411b363SPhilipp Reisner
748b30ab791SAndreas Gruenbacher if (device->bm_resync_fo >= drbd_bm_bits(device)) {
749b411b363SPhilipp Reisner /* last syncer _request_ was sent,
750b411b363SPhilipp Reisner * but the P_RS_DATA_REPLY not yet received. sync will end (and
751b411b363SPhilipp Reisner * next sync group will resume), as soon as we receive the last
752b411b363SPhilipp Reisner * resync data block, and the last bit is cleared.
753b411b363SPhilipp Reisner * until then resync "work" is "inactive" ...
754b411b363SPhilipp Reisner */
755b30ab791SAndreas Gruenbacher put_ldev(device);
75699920dc5SAndreas Gruenbacher return 0;
757b411b363SPhilipp Reisner }
758b411b363SPhilipp Reisner
759b411b363SPhilipp Reisner requeue:
760b30ab791SAndreas Gruenbacher device->rs_in_flight += (i << (BM_BLOCK_SHIFT - 9));
761b30ab791SAndreas Gruenbacher mod_timer(&device->resync_timer, jiffies + SLEEP_TIME);
762b30ab791SAndreas Gruenbacher put_ldev(device);
76399920dc5SAndreas Gruenbacher return 0;
764b411b363SPhilipp Reisner }
765b411b363SPhilipp Reisner
make_ov_request(struct drbd_peer_device * peer_device,int cancel)7660d11f3cfSChristoph Böhmwalder static int make_ov_request(struct drbd_peer_device *peer_device, int cancel)
767b411b363SPhilipp Reisner {
7680d11f3cfSChristoph Böhmwalder struct drbd_device *device = peer_device->device;
769b411b363SPhilipp Reisner int number, i, size;
770b411b363SPhilipp Reisner sector_t sector;
771155bd9d1SChristoph Hellwig const sector_t capacity = get_capacity(device->vdisk);
77258ffa580SLars Ellenberg bool stop_sector_reached = false;
773b411b363SPhilipp Reisner
774b411b363SPhilipp Reisner if (unlikely(cancel))
775b411b363SPhilipp Reisner return 1;
776b411b363SPhilipp Reisner
7770d11f3cfSChristoph Böhmwalder number = drbd_rs_number_requests(peer_device);
778b411b363SPhilipp Reisner
779b30ab791SAndreas Gruenbacher sector = device->ov_position;
780b411b363SPhilipp Reisner for (i = 0; i < number; i++) {
78158ffa580SLars Ellenberg if (sector >= capacity)
782b411b363SPhilipp Reisner return 1;
78358ffa580SLars Ellenberg
78458ffa580SLars Ellenberg /* We check for "finished" only in the reply path:
78558ffa580SLars Ellenberg * w_e_end_ov_reply().
78658ffa580SLars Ellenberg * We need to send at least one request out. */
78758ffa580SLars Ellenberg stop_sector_reached = i > 0
788b30ab791SAndreas Gruenbacher && verify_can_do_stop_sector(device)
789b30ab791SAndreas Gruenbacher && sector >= device->ov_stop_sector;
79058ffa580SLars Ellenberg if (stop_sector_reached)
79158ffa580SLars Ellenberg break;
792b411b363SPhilipp Reisner
793b411b363SPhilipp Reisner size = BM_BLOCK_SIZE;
794b411b363SPhilipp Reisner
7950d11f3cfSChristoph Böhmwalder if (drbd_try_rs_begin_io(peer_device, sector)) {
796b30ab791SAndreas Gruenbacher device->ov_position = sector;
797b411b363SPhilipp Reisner goto requeue;
798b411b363SPhilipp Reisner }
799b411b363SPhilipp Reisner
800b411b363SPhilipp Reisner if (sector + (size>>9) > capacity)
801b411b363SPhilipp Reisner size = (capacity-sector)<<9;
802b411b363SPhilipp Reisner
8030d11f3cfSChristoph Böhmwalder inc_rs_pending(peer_device);
80469a22773SAndreas Gruenbacher if (drbd_send_ov_request(first_peer_device(device), sector, size)) {
8050d11f3cfSChristoph Böhmwalder dec_rs_pending(peer_device);
806b411b363SPhilipp Reisner return 0;
807b411b363SPhilipp Reisner }
808b411b363SPhilipp Reisner sector += BM_SECT_PER_BIT;
809b411b363SPhilipp Reisner }
810b30ab791SAndreas Gruenbacher device->ov_position = sector;
811b411b363SPhilipp Reisner
812b411b363SPhilipp Reisner requeue:
813b30ab791SAndreas Gruenbacher device->rs_in_flight += (i << (BM_BLOCK_SHIFT - 9));
81458ffa580SLars Ellenberg if (i == 0 || !stop_sector_reached)
815b30ab791SAndreas Gruenbacher mod_timer(&device->resync_timer, jiffies + SLEEP_TIME);
816b411b363SPhilipp Reisner return 1;
817b411b363SPhilipp Reisner }
818b411b363SPhilipp Reisner
w_ov_finished(struct drbd_work * w,int cancel)81999920dc5SAndreas Gruenbacher int w_ov_finished(struct drbd_work *w, int cancel)
820b411b363SPhilipp Reisner {
82184b8c06bSAndreas Gruenbacher struct drbd_device_work *dw =
82284b8c06bSAndreas Gruenbacher container_of(w, struct drbd_device_work, w);
82384b8c06bSAndreas Gruenbacher struct drbd_device *device = dw->device;
82484b8c06bSAndreas Gruenbacher kfree(dw);
8250d11f3cfSChristoph Böhmwalder ov_out_of_sync_print(first_peer_device(device));
8260d11f3cfSChristoph Böhmwalder drbd_resync_finished(first_peer_device(device));
827b411b363SPhilipp Reisner
82899920dc5SAndreas Gruenbacher return 0;
829b411b363SPhilipp Reisner }
830b411b363SPhilipp Reisner
w_resync_finished(struct drbd_work * w,int cancel)83199920dc5SAndreas Gruenbacher static int w_resync_finished(struct drbd_work *w, int cancel)
832b411b363SPhilipp Reisner {
83384b8c06bSAndreas Gruenbacher struct drbd_device_work *dw =
83484b8c06bSAndreas Gruenbacher container_of(w, struct drbd_device_work, w);
83584b8c06bSAndreas Gruenbacher struct drbd_device *device = dw->device;
83684b8c06bSAndreas Gruenbacher kfree(dw);
837b411b363SPhilipp Reisner
8380d11f3cfSChristoph Böhmwalder drbd_resync_finished(first_peer_device(device));
839b411b363SPhilipp Reisner
84099920dc5SAndreas Gruenbacher return 0;
841b411b363SPhilipp Reisner }
842b411b363SPhilipp Reisner
ping_peer(struct drbd_device * device)843b30ab791SAndreas Gruenbacher static void ping_peer(struct drbd_device *device)
844af85e8e8SLars Ellenberg {
845a6b32bc3SAndreas Gruenbacher struct drbd_connection *connection = first_peer_device(device)->connection;
8462a67d8b9SPhilipp Reisner
847bde89a9eSAndreas Gruenbacher clear_bit(GOT_PING_ACK, &connection->flags);
848bde89a9eSAndreas Gruenbacher request_ping(connection);
849bde89a9eSAndreas Gruenbacher wait_event(connection->ping_wait,
850bde89a9eSAndreas Gruenbacher test_bit(GOT_PING_ACK, &connection->flags) || device->state.conn < C_CONNECTED);
851af85e8e8SLars Ellenberg }
852af85e8e8SLars Ellenberg
drbd_resync_finished(struct drbd_peer_device * peer_device)8530d11f3cfSChristoph Böhmwalder int drbd_resync_finished(struct drbd_peer_device *peer_device)
854b411b363SPhilipp Reisner {
8550d11f3cfSChristoph Böhmwalder struct drbd_device *device = peer_device->device;
8560d11f3cfSChristoph Böhmwalder struct drbd_connection *connection = peer_device->connection;
857b411b363SPhilipp Reisner unsigned long db, dt, dbdt;
858b411b363SPhilipp Reisner unsigned long n_oos;
859b411b363SPhilipp Reisner union drbd_state os, ns;
86084b8c06bSAndreas Gruenbacher struct drbd_device_work *dw;
861b411b363SPhilipp Reisner char *khelper_cmd = NULL;
86226525618SLars Ellenberg int verify_done = 0;
863b411b363SPhilipp Reisner
864b411b363SPhilipp Reisner /* Remove all elements from the resync LRU. Since future actions
865b411b363SPhilipp Reisner * might set bits in the (main) bitmap, then the entries in the
866b411b363SPhilipp Reisner * resync LRU would be wrong. */
867b30ab791SAndreas Gruenbacher if (drbd_rs_del_all(device)) {
868b411b363SPhilipp Reisner /* In case this is not possible now, most probably because
869b411b363SPhilipp Reisner * there are P_RS_DATA_REPLY Packets lingering on the worker's
870b411b363SPhilipp Reisner * queue (or even the read operations for those packets
871b411b363SPhilipp Reisner * is not finished by now). Retry in 100ms. */
872b411b363SPhilipp Reisner
87320ee6390SPhilipp Reisner schedule_timeout_interruptible(HZ / 10);
87484b8c06bSAndreas Gruenbacher dw = kmalloc(sizeof(struct drbd_device_work), GFP_ATOMIC);
87584b8c06bSAndreas Gruenbacher if (dw) {
87684b8c06bSAndreas Gruenbacher dw->w.cb = w_resync_finished;
87784b8c06bSAndreas Gruenbacher dw->device = device;
87826a96110SLars Ellenberg drbd_queue_work(&connection->sender_work, &dw->w);
879b411b363SPhilipp Reisner return 1;
880b411b363SPhilipp Reisner }
88184b8c06bSAndreas Gruenbacher drbd_err(device, "Warn failed to drbd_rs_del_all() and to kmalloc(dw).\n");
882b411b363SPhilipp Reisner }
883b411b363SPhilipp Reisner
884b30ab791SAndreas Gruenbacher dt = (jiffies - device->rs_start - device->rs_paused) / HZ;
885b411b363SPhilipp Reisner if (dt <= 0)
886b411b363SPhilipp Reisner dt = 1;
88758ffa580SLars Ellenberg
888b30ab791SAndreas Gruenbacher db = device->rs_total;
88958ffa580SLars Ellenberg /* adjust for verify start and stop sectors, respective reached position */
890b30ab791SAndreas Gruenbacher if (device->state.conn == C_VERIFY_S || device->state.conn == C_VERIFY_T)
891b30ab791SAndreas Gruenbacher db -= device->ov_left;
89258ffa580SLars Ellenberg
893b411b363SPhilipp Reisner dbdt = Bit2KB(db/dt);
894b30ab791SAndreas Gruenbacher device->rs_paused /= HZ;
895b411b363SPhilipp Reisner
896b30ab791SAndreas Gruenbacher if (!get_ldev(device))
897b411b363SPhilipp Reisner goto out;
898b411b363SPhilipp Reisner
899b30ab791SAndreas Gruenbacher ping_peer(device);
900af85e8e8SLars Ellenberg
9010500813fSAndreas Gruenbacher spin_lock_irq(&device->resource->req_lock);
902b30ab791SAndreas Gruenbacher os = drbd_read_state(device);
903b411b363SPhilipp Reisner
90426525618SLars Ellenberg verify_done = (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T);
90526525618SLars Ellenberg
906b411b363SPhilipp Reisner /* This protects us against multiple calls (that can happen in the presence
907b411b363SPhilipp Reisner of application IO), and against connectivity loss just before we arrive here. */
908b411b363SPhilipp Reisner if (os.conn <= C_CONNECTED)
909b411b363SPhilipp Reisner goto out_unlock;
910b411b363SPhilipp Reisner
911b411b363SPhilipp Reisner ns = os;
912b411b363SPhilipp Reisner ns.conn = C_CONNECTED;
913b411b363SPhilipp Reisner
914d0180171SAndreas Gruenbacher drbd_info(device, "%s done (total %lu sec; paused %lu sec; %lu K/sec)\n",
91526525618SLars Ellenberg verify_done ? "Online verify" : "Resync",
916b30ab791SAndreas Gruenbacher dt + device->rs_paused, device->rs_paused, dbdt);
917b411b363SPhilipp Reisner
918b30ab791SAndreas Gruenbacher n_oos = drbd_bm_total_weight(device);
919b411b363SPhilipp Reisner
920b411b363SPhilipp Reisner if (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) {
921b411b363SPhilipp Reisner if (n_oos) {
922d0180171SAndreas Gruenbacher drbd_alert(device, "Online verify found %lu %dk block out of sync!\n",
923b411b363SPhilipp Reisner n_oos, Bit2KB(1));
924b411b363SPhilipp Reisner khelper_cmd = "out-of-sync";
925b411b363SPhilipp Reisner }
926b411b363SPhilipp Reisner } else {
9270b0ba1efSAndreas Gruenbacher D_ASSERT(device, (n_oos - device->rs_failed) == 0);
928b411b363SPhilipp Reisner
929b411b363SPhilipp Reisner if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T)
930b411b363SPhilipp Reisner khelper_cmd = "after-resync-target";
931b411b363SPhilipp Reisner
932aaaba345SLars Ellenberg if (device->use_csums && device->rs_total) {
933b30ab791SAndreas Gruenbacher const unsigned long s = device->rs_same_csum;
934b30ab791SAndreas Gruenbacher const unsigned long t = device->rs_total;
935b411b363SPhilipp Reisner const int ratio =
936b411b363SPhilipp Reisner (t == 0) ? 0 :
937b411b363SPhilipp Reisner (t < 100000) ? ((s*100)/t) : (s/(t/100));
938d0180171SAndreas Gruenbacher drbd_info(device, "%u %% had equal checksums, eliminated: %luK; "
939b411b363SPhilipp Reisner "transferred %luK total %luK\n",
940b411b363SPhilipp Reisner ratio,
941b30ab791SAndreas Gruenbacher Bit2KB(device->rs_same_csum),
942b30ab791SAndreas Gruenbacher Bit2KB(device->rs_total - device->rs_same_csum),
943b30ab791SAndreas Gruenbacher Bit2KB(device->rs_total));
944b411b363SPhilipp Reisner }
945b411b363SPhilipp Reisner }
946b411b363SPhilipp Reisner
947b30ab791SAndreas Gruenbacher if (device->rs_failed) {
948d0180171SAndreas Gruenbacher drbd_info(device, " %lu failed blocks\n", device->rs_failed);
949b411b363SPhilipp Reisner
950b411b363SPhilipp Reisner if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T) {
951b411b363SPhilipp Reisner ns.disk = D_INCONSISTENT;
952b411b363SPhilipp Reisner ns.pdsk = D_UP_TO_DATE;
953b411b363SPhilipp Reisner } else {
954b411b363SPhilipp Reisner ns.disk = D_UP_TO_DATE;
955b411b363SPhilipp Reisner ns.pdsk = D_INCONSISTENT;
956b411b363SPhilipp Reisner }
957b411b363SPhilipp Reisner } else {
958b411b363SPhilipp Reisner ns.disk = D_UP_TO_DATE;
959b411b363SPhilipp Reisner ns.pdsk = D_UP_TO_DATE;
960b411b363SPhilipp Reisner
961b411b363SPhilipp Reisner if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T) {
962b30ab791SAndreas Gruenbacher if (device->p_uuid) {
963b411b363SPhilipp Reisner int i;
964b411b363SPhilipp Reisner for (i = UI_BITMAP ; i <= UI_HISTORY_END ; i++)
965b30ab791SAndreas Gruenbacher _drbd_uuid_set(device, i, device->p_uuid[i]);
966b30ab791SAndreas Gruenbacher drbd_uuid_set(device, UI_BITMAP, device->ldev->md.uuid[UI_CURRENT]);
967b30ab791SAndreas Gruenbacher _drbd_uuid_set(device, UI_CURRENT, device->p_uuid[UI_CURRENT]);
968b411b363SPhilipp Reisner } else {
969d0180171SAndreas Gruenbacher drbd_err(device, "device->p_uuid is NULL! BUG\n");
970b411b363SPhilipp Reisner }
971b411b363SPhilipp Reisner }
972b411b363SPhilipp Reisner
97362b0da3aSLars Ellenberg if (!(os.conn == C_VERIFY_S || os.conn == C_VERIFY_T)) {
97462b0da3aSLars Ellenberg /* for verify runs, we don't update uuids here,
97562b0da3aSLars Ellenberg * so there would be nothing to report. */
976b30ab791SAndreas Gruenbacher drbd_uuid_set_bm(device, 0UL);
977b30ab791SAndreas Gruenbacher drbd_print_uuids(device, "updated UUIDs");
978b30ab791SAndreas Gruenbacher if (device->p_uuid) {
979b411b363SPhilipp Reisner /* Now the two UUID sets are equal, update what we
980b411b363SPhilipp Reisner * know of the peer. */
981b411b363SPhilipp Reisner int i;
982b411b363SPhilipp Reisner for (i = UI_CURRENT ; i <= UI_HISTORY_END ; i++)
983b30ab791SAndreas Gruenbacher device->p_uuid[i] = device->ldev->md.uuid[i];
984b411b363SPhilipp Reisner }
985b411b363SPhilipp Reisner }
98662b0da3aSLars Ellenberg }
987b411b363SPhilipp Reisner
988b30ab791SAndreas Gruenbacher _drbd_set_state(device, ns, CS_VERBOSE, NULL);
989b411b363SPhilipp Reisner out_unlock:
9900500813fSAndreas Gruenbacher spin_unlock_irq(&device->resource->req_lock);
99126a96110SLars Ellenberg
99226a96110SLars Ellenberg /* If we have been sync source, and have an effective fencing-policy,
99326a96110SLars Ellenberg * once *all* volumes are back in sync, call "unfence". */
99426a96110SLars Ellenberg if (os.conn == C_SYNC_SOURCE) {
99526a96110SLars Ellenberg enum drbd_disk_state disk_state = D_MASK;
99626a96110SLars Ellenberg enum drbd_disk_state pdsk_state = D_MASK;
99726a96110SLars Ellenberg enum drbd_fencing_p fp = FP_DONT_CARE;
99826a96110SLars Ellenberg
99926a96110SLars Ellenberg rcu_read_lock();
100026a96110SLars Ellenberg fp = rcu_dereference(device->ldev->disk_conf)->fencing;
100126a96110SLars Ellenberg if (fp != FP_DONT_CARE) {
100226a96110SLars Ellenberg struct drbd_peer_device *peer_device;
100326a96110SLars Ellenberg int vnr;
100426a96110SLars Ellenberg idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
100526a96110SLars Ellenberg struct drbd_device *device = peer_device->device;
100626a96110SLars Ellenberg disk_state = min_t(enum drbd_disk_state, disk_state, device->state.disk);
100726a96110SLars Ellenberg pdsk_state = min_t(enum drbd_disk_state, pdsk_state, device->state.pdsk);
100826a96110SLars Ellenberg }
100926a96110SLars Ellenberg }
101026a96110SLars Ellenberg rcu_read_unlock();
101126a96110SLars Ellenberg if (disk_state == D_UP_TO_DATE && pdsk_state == D_UP_TO_DATE)
101226a96110SLars Ellenberg conn_khelper(connection, "unfence-peer");
101326a96110SLars Ellenberg }
101426a96110SLars Ellenberg
1015b30ab791SAndreas Gruenbacher put_ldev(device);
1016b411b363SPhilipp Reisner out:
1017b30ab791SAndreas Gruenbacher device->rs_total = 0;
1018b30ab791SAndreas Gruenbacher device->rs_failed = 0;
1019b30ab791SAndreas Gruenbacher device->rs_paused = 0;
102058ffa580SLars Ellenberg
102158ffa580SLars Ellenberg /* reset start sector, if we reached end of device */
1022b30ab791SAndreas Gruenbacher if (verify_done && device->ov_left == 0)
1023b30ab791SAndreas Gruenbacher device->ov_start_sector = 0;
1024b411b363SPhilipp Reisner
1025b30ab791SAndreas Gruenbacher drbd_md_sync(device);
102613d42685SLars Ellenberg
1027b411b363SPhilipp Reisner if (khelper_cmd)
1028b30ab791SAndreas Gruenbacher drbd_khelper(device, khelper_cmd);
1029b411b363SPhilipp Reisner
1030b411b363SPhilipp Reisner return 1;
1031b411b363SPhilipp Reisner }
1032b411b363SPhilipp Reisner
1033b411b363SPhilipp Reisner /* helper */
move_to_net_ee_or_free(struct drbd_device * device,struct drbd_peer_request * peer_req)1034b30ab791SAndreas Gruenbacher static void move_to_net_ee_or_free(struct drbd_device *device, struct drbd_peer_request *peer_req)
1035b411b363SPhilipp Reisner {
1036045417f7SAndreas Gruenbacher if (drbd_peer_req_has_active_page(peer_req)) {
1037b411b363SPhilipp Reisner /* This might happen if sendpage() has not finished */
1038ba6bee98SCai Huoqing int i = PFN_UP(peer_req->i.size);
1039b30ab791SAndreas Gruenbacher atomic_add(i, &device->pp_in_use_by_net);
1040b30ab791SAndreas Gruenbacher atomic_sub(i, &device->pp_in_use);
10410500813fSAndreas Gruenbacher spin_lock_irq(&device->resource->req_lock);
1042a8cd15baSAndreas Gruenbacher list_add_tail(&peer_req->w.list, &device->net_ee);
10430500813fSAndreas Gruenbacher spin_unlock_irq(&device->resource->req_lock);
1044435f0740SLars Ellenberg wake_up(&drbd_pp_wait);
1045b411b363SPhilipp Reisner } else
1046b30ab791SAndreas Gruenbacher drbd_free_peer_req(device, peer_req);
1047b411b363SPhilipp Reisner }
1048b411b363SPhilipp Reisner
1049b411b363SPhilipp Reisner /**
1050b411b363SPhilipp Reisner * w_e_end_data_req() - Worker callback, to send a P_DATA_REPLY packet in response to a P_DATA_REQUEST
1051b411b363SPhilipp Reisner * @w: work object.
1052b411b363SPhilipp Reisner * @cancel: The connection will be closed anyways
1053b411b363SPhilipp Reisner */
w_e_end_data_req(struct drbd_work * w,int cancel)105499920dc5SAndreas Gruenbacher int w_e_end_data_req(struct drbd_work *w, int cancel)
1055b411b363SPhilipp Reisner {
1056a8cd15baSAndreas Gruenbacher struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
10576780139cSAndreas Gruenbacher struct drbd_peer_device *peer_device = peer_req->peer_device;
10586780139cSAndreas Gruenbacher struct drbd_device *device = peer_device->device;
105999920dc5SAndreas Gruenbacher int err;
1060b411b363SPhilipp Reisner
1061b411b363SPhilipp Reisner if (unlikely(cancel)) {
1062b30ab791SAndreas Gruenbacher drbd_free_peer_req(device, peer_req);
1063b30ab791SAndreas Gruenbacher dec_unacked(device);
106499920dc5SAndreas Gruenbacher return 0;
1065b411b363SPhilipp Reisner }
1066b411b363SPhilipp Reisner
1067db830c46SAndreas Gruenbacher if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
10686780139cSAndreas Gruenbacher err = drbd_send_block(peer_device, P_DATA_REPLY, peer_req);
1069b411b363SPhilipp Reisner } else {
1070e3fa02d7SChristoph Böhmwalder if (drbd_ratelimit())
1071d0180171SAndreas Gruenbacher drbd_err(device, "Sending NegDReply. sector=%llus.\n",
1072db830c46SAndreas Gruenbacher (unsigned long long)peer_req->i.sector);
1073b411b363SPhilipp Reisner
10746780139cSAndreas Gruenbacher err = drbd_send_ack(peer_device, P_NEG_DREPLY, peer_req);
1075b411b363SPhilipp Reisner }
1076b411b363SPhilipp Reisner
1077b30ab791SAndreas Gruenbacher dec_unacked(device);
1078b411b363SPhilipp Reisner
1079b30ab791SAndreas Gruenbacher move_to_net_ee_or_free(device, peer_req);
1080b411b363SPhilipp Reisner
108199920dc5SAndreas Gruenbacher if (unlikely(err))
1082d0180171SAndreas Gruenbacher drbd_err(device, "drbd_send_block() failed\n");
108399920dc5SAndreas Gruenbacher return err;
1084b411b363SPhilipp Reisner }
1085b411b363SPhilipp Reisner
all_zero(struct drbd_peer_request * peer_req)1086700ca8c0SPhilipp Reisner static bool all_zero(struct drbd_peer_request *peer_req)
1087700ca8c0SPhilipp Reisner {
1088700ca8c0SPhilipp Reisner struct page *page = peer_req->pages;
1089700ca8c0SPhilipp Reisner unsigned int len = peer_req->i.size;
1090700ca8c0SPhilipp Reisner
1091700ca8c0SPhilipp Reisner page_chain_for_each(page) {
1092700ca8c0SPhilipp Reisner unsigned int l = min_t(unsigned int, len, PAGE_SIZE);
1093700ca8c0SPhilipp Reisner unsigned int i, words = l / sizeof(long);
1094700ca8c0SPhilipp Reisner unsigned long *d;
1095700ca8c0SPhilipp Reisner
1096700ca8c0SPhilipp Reisner d = kmap_atomic(page);
1097700ca8c0SPhilipp Reisner for (i = 0; i < words; i++) {
1098700ca8c0SPhilipp Reisner if (d[i]) {
1099700ca8c0SPhilipp Reisner kunmap_atomic(d);
1100700ca8c0SPhilipp Reisner return false;
1101700ca8c0SPhilipp Reisner }
1102700ca8c0SPhilipp Reisner }
1103700ca8c0SPhilipp Reisner kunmap_atomic(d);
1104700ca8c0SPhilipp Reisner len -= l;
1105700ca8c0SPhilipp Reisner }
1106700ca8c0SPhilipp Reisner
1107700ca8c0SPhilipp Reisner return true;
1108700ca8c0SPhilipp Reisner }
1109700ca8c0SPhilipp Reisner
1110b411b363SPhilipp Reisner /**
1111a209b4aeSAndreas Gruenbacher * w_e_end_rsdata_req() - Worker callback to send a P_RS_DATA_REPLY packet in response to a P_RS_DATA_REQUEST
1112b411b363SPhilipp Reisner * @w: work object.
1113b411b363SPhilipp Reisner * @cancel: The connection will be closed anyways
1114b411b363SPhilipp Reisner */
w_e_end_rsdata_req(struct drbd_work * w,int cancel)111599920dc5SAndreas Gruenbacher int w_e_end_rsdata_req(struct drbd_work *w, int cancel)
1116b411b363SPhilipp Reisner {
1117a8cd15baSAndreas Gruenbacher struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
11186780139cSAndreas Gruenbacher struct drbd_peer_device *peer_device = peer_req->peer_device;
11196780139cSAndreas Gruenbacher struct drbd_device *device = peer_device->device;
112099920dc5SAndreas Gruenbacher int err;
1121b411b363SPhilipp Reisner
1122b411b363SPhilipp Reisner if (unlikely(cancel)) {
1123b30ab791SAndreas Gruenbacher drbd_free_peer_req(device, peer_req);
1124b30ab791SAndreas Gruenbacher dec_unacked(device);
112599920dc5SAndreas Gruenbacher return 0;
1126b411b363SPhilipp Reisner }
1127b411b363SPhilipp Reisner
1128b30ab791SAndreas Gruenbacher if (get_ldev_if_state(device, D_FAILED)) {
1129b30ab791SAndreas Gruenbacher drbd_rs_complete_io(device, peer_req->i.sector);
1130b30ab791SAndreas Gruenbacher put_ldev(device);
1131b411b363SPhilipp Reisner }
1132b411b363SPhilipp Reisner
1133b30ab791SAndreas Gruenbacher if (device->state.conn == C_AHEAD) {
11346780139cSAndreas Gruenbacher err = drbd_send_ack(peer_device, P_RS_CANCEL, peer_req);
1135db830c46SAndreas Gruenbacher } else if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
1136b30ab791SAndreas Gruenbacher if (likely(device->state.pdsk >= D_INCONSISTENT)) {
11370d11f3cfSChristoph Böhmwalder inc_rs_pending(peer_device);
1138700ca8c0SPhilipp Reisner if (peer_req->flags & EE_RS_THIN_REQ && all_zero(peer_req))
1139700ca8c0SPhilipp Reisner err = drbd_send_rs_deallocated(peer_device, peer_req);
1140700ca8c0SPhilipp Reisner else
11416780139cSAndreas Gruenbacher err = drbd_send_block(peer_device, P_RS_DATA_REPLY, peer_req);
1142b411b363SPhilipp Reisner } else {
1143e3fa02d7SChristoph Böhmwalder if (drbd_ratelimit())
1144d0180171SAndreas Gruenbacher drbd_err(device, "Not sending RSDataReply, "
1145b411b363SPhilipp Reisner "partner DISKLESS!\n");
114699920dc5SAndreas Gruenbacher err = 0;
1147b411b363SPhilipp Reisner }
1148b411b363SPhilipp Reisner } else {
1149e3fa02d7SChristoph Böhmwalder if (drbd_ratelimit())
1150d0180171SAndreas Gruenbacher drbd_err(device, "Sending NegRSDReply. sector %llus.\n",
1151db830c46SAndreas Gruenbacher (unsigned long long)peer_req->i.sector);
1152b411b363SPhilipp Reisner
11536780139cSAndreas Gruenbacher err = drbd_send_ack(peer_device, P_NEG_RS_DREPLY, peer_req);
1154b411b363SPhilipp Reisner
1155b411b363SPhilipp Reisner /* update resync data with failure */
11560d11f3cfSChristoph Böhmwalder drbd_rs_failed_io(peer_device, peer_req->i.sector, peer_req->i.size);
1157b411b363SPhilipp Reisner }
1158b411b363SPhilipp Reisner
1159b30ab791SAndreas Gruenbacher dec_unacked(device);
1160b411b363SPhilipp Reisner
1161b30ab791SAndreas Gruenbacher move_to_net_ee_or_free(device, peer_req);
1162b411b363SPhilipp Reisner
116399920dc5SAndreas Gruenbacher if (unlikely(err))
1164d0180171SAndreas Gruenbacher drbd_err(device, "drbd_send_block() failed\n");
116599920dc5SAndreas Gruenbacher return err;
1166b411b363SPhilipp Reisner }
1167b411b363SPhilipp Reisner
w_e_end_csum_rs_req(struct drbd_work * w,int cancel)116899920dc5SAndreas Gruenbacher int w_e_end_csum_rs_req(struct drbd_work *w, int cancel)
1169b411b363SPhilipp Reisner {
1170a8cd15baSAndreas Gruenbacher struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
11716780139cSAndreas Gruenbacher struct drbd_peer_device *peer_device = peer_req->peer_device;
11726780139cSAndreas Gruenbacher struct drbd_device *device = peer_device->device;
1173b411b363SPhilipp Reisner struct digest_info *di;
1174b411b363SPhilipp Reisner int digest_size;
1175b411b363SPhilipp Reisner void *digest = NULL;
117699920dc5SAndreas Gruenbacher int err, eq = 0;
1177b411b363SPhilipp Reisner
1178b411b363SPhilipp Reisner if (unlikely(cancel)) {
1179b30ab791SAndreas Gruenbacher drbd_free_peer_req(device, peer_req);
1180b30ab791SAndreas Gruenbacher dec_unacked(device);
118199920dc5SAndreas Gruenbacher return 0;
1182b411b363SPhilipp Reisner }
1183b411b363SPhilipp Reisner
1184b30ab791SAndreas Gruenbacher if (get_ldev(device)) {
1185b30ab791SAndreas Gruenbacher drbd_rs_complete_io(device, peer_req->i.sector);
1186b30ab791SAndreas Gruenbacher put_ldev(device);
11871d53f09eSLars Ellenberg }
1188b411b363SPhilipp Reisner
1189db830c46SAndreas Gruenbacher di = peer_req->digest;
1190b411b363SPhilipp Reisner
1191db830c46SAndreas Gruenbacher if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
1192b411b363SPhilipp Reisner /* quick hack to try to avoid a race against reconfiguration.
1193b411b363SPhilipp Reisner * a real fix would be much more involved,
1194b411b363SPhilipp Reisner * introducing more locking mechanisms */
11956780139cSAndreas Gruenbacher if (peer_device->connection->csums_tfm) {
11963d0e6375SKees Cook digest_size = crypto_shash_digestsize(peer_device->connection->csums_tfm);
11970b0ba1efSAndreas Gruenbacher D_ASSERT(device, digest_size == di->digest_size);
1198b411b363SPhilipp Reisner digest = kmalloc(digest_size, GFP_NOIO);
1199b411b363SPhilipp Reisner }
1200b411b363SPhilipp Reisner if (digest) {
12016780139cSAndreas Gruenbacher drbd_csum_ee(peer_device->connection->csums_tfm, peer_req, digest);
1202b411b363SPhilipp Reisner eq = !memcmp(digest, di->digest, digest_size);
1203b411b363SPhilipp Reisner kfree(digest);
1204b411b363SPhilipp Reisner }
1205b411b363SPhilipp Reisner
1206b411b363SPhilipp Reisner if (eq) {
12070d11f3cfSChristoph Böhmwalder drbd_set_in_sync(peer_device, peer_req->i.sector, peer_req->i.size);
1208676396d5SLars Ellenberg /* rs_same_csums unit is BM_BLOCK_SIZE */
1209b30ab791SAndreas Gruenbacher device->rs_same_csum += peer_req->i.size >> BM_BLOCK_SHIFT;
12106780139cSAndreas Gruenbacher err = drbd_send_ack(peer_device, P_RS_IS_IN_SYNC, peer_req);
1211b411b363SPhilipp Reisner } else {
12120d11f3cfSChristoph Böhmwalder inc_rs_pending(peer_device);
1213db830c46SAndreas Gruenbacher peer_req->block_id = ID_SYNCER; /* By setting block_id, digest pointer becomes invalid! */
1214db830c46SAndreas Gruenbacher peer_req->flags &= ~EE_HAS_DIGEST; /* This peer request no longer has a digest pointer */
1215204bba99SPhilipp Reisner kfree(di);
12166780139cSAndreas Gruenbacher err = drbd_send_block(peer_device, P_RS_DATA_REPLY, peer_req);
1217b411b363SPhilipp Reisner }
1218b411b363SPhilipp Reisner } else {
12196780139cSAndreas Gruenbacher err = drbd_send_ack(peer_device, P_NEG_RS_DREPLY, peer_req);
1220e3fa02d7SChristoph Böhmwalder if (drbd_ratelimit())
1221d0180171SAndreas Gruenbacher drbd_err(device, "Sending NegDReply. I guess it gets messy.\n");
1222b411b363SPhilipp Reisner }
1223b411b363SPhilipp Reisner
1224b30ab791SAndreas Gruenbacher dec_unacked(device);
1225b30ab791SAndreas Gruenbacher move_to_net_ee_or_free(device, peer_req);
1226b411b363SPhilipp Reisner
122799920dc5SAndreas Gruenbacher if (unlikely(err))
1228d0180171SAndreas Gruenbacher drbd_err(device, "drbd_send_block/ack() failed\n");
122999920dc5SAndreas Gruenbacher return err;
1230b411b363SPhilipp Reisner }
1231b411b363SPhilipp Reisner
w_e_end_ov_req(struct drbd_work * w,int cancel)123299920dc5SAndreas Gruenbacher int w_e_end_ov_req(struct drbd_work *w, int cancel)
1233b411b363SPhilipp Reisner {
1234a8cd15baSAndreas Gruenbacher struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
12356780139cSAndreas Gruenbacher struct drbd_peer_device *peer_device = peer_req->peer_device;
12366780139cSAndreas Gruenbacher struct drbd_device *device = peer_device->device;
1237db830c46SAndreas Gruenbacher sector_t sector = peer_req->i.sector;
1238db830c46SAndreas Gruenbacher unsigned int size = peer_req->i.size;
1239b411b363SPhilipp Reisner int digest_size;
1240b411b363SPhilipp Reisner void *digest;
124199920dc5SAndreas Gruenbacher int err = 0;
1242b411b363SPhilipp Reisner
1243b411b363SPhilipp Reisner if (unlikely(cancel))
1244b411b363SPhilipp Reisner goto out;
1245b411b363SPhilipp Reisner
12463d0e6375SKees Cook digest_size = crypto_shash_digestsize(peer_device->connection->verify_tfm);
1247b411b363SPhilipp Reisner digest = kmalloc(digest_size, GFP_NOIO);
12488f21420eSPhilipp Reisner if (!digest) {
124999920dc5SAndreas Gruenbacher err = 1; /* terminate the connection in case the allocation failed */
12508f21420eSPhilipp Reisner goto out;
12518f21420eSPhilipp Reisner }
12528f21420eSPhilipp Reisner
1253db830c46SAndreas Gruenbacher if (likely(!(peer_req->flags & EE_WAS_ERROR)))
12546780139cSAndreas Gruenbacher drbd_csum_ee(peer_device->connection->verify_tfm, peer_req, digest);
12558f21420eSPhilipp Reisner else
12568f21420eSPhilipp Reisner memset(digest, 0, digest_size);
12578f21420eSPhilipp Reisner
125853ea4331SLars Ellenberg /* Free e and pages before send.
125953ea4331SLars Ellenberg * In case we block on congestion, we could otherwise run into
126053ea4331SLars Ellenberg * some distributed deadlock, if the other side blocks on
126153ea4331SLars Ellenberg * congestion as well, because our receiver blocks in
1262c37c8ecfSAndreas Gruenbacher * drbd_alloc_pages due to pp_in_use > max_buffers. */
1263b30ab791SAndreas Gruenbacher drbd_free_peer_req(device, peer_req);
1264db830c46SAndreas Gruenbacher peer_req = NULL;
12650d11f3cfSChristoph Böhmwalder inc_rs_pending(peer_device);
12666780139cSAndreas Gruenbacher err = drbd_send_drequest_csum(peer_device, sector, size, digest, digest_size, P_OV_REPLY);
126799920dc5SAndreas Gruenbacher if (err)
12680d11f3cfSChristoph Böhmwalder dec_rs_pending(peer_device);
1269b411b363SPhilipp Reisner kfree(digest);
1270b411b363SPhilipp Reisner
1271b411b363SPhilipp Reisner out:
1272db830c46SAndreas Gruenbacher if (peer_req)
1273b30ab791SAndreas Gruenbacher drbd_free_peer_req(device, peer_req);
1274b30ab791SAndreas Gruenbacher dec_unacked(device);
127599920dc5SAndreas Gruenbacher return err;
1276b411b363SPhilipp Reisner }
1277b411b363SPhilipp Reisner
drbd_ov_out_of_sync_found(struct drbd_peer_device * peer_device,sector_t sector,int size)12780d11f3cfSChristoph Böhmwalder void drbd_ov_out_of_sync_found(struct drbd_peer_device *peer_device, sector_t sector, int size)
1279b411b363SPhilipp Reisner {
12800d11f3cfSChristoph Böhmwalder struct drbd_device *device = peer_device->device;
1281b30ab791SAndreas Gruenbacher if (device->ov_last_oos_start + device->ov_last_oos_size == sector) {
1282b30ab791SAndreas Gruenbacher device->ov_last_oos_size += size>>9;
1283b411b363SPhilipp Reisner } else {
1284b30ab791SAndreas Gruenbacher device->ov_last_oos_start = sector;
1285b30ab791SAndreas Gruenbacher device->ov_last_oos_size = size>>9;
1286b411b363SPhilipp Reisner }
12870d11f3cfSChristoph Böhmwalder drbd_set_out_of_sync(peer_device, sector, size);
1288b411b363SPhilipp Reisner }
1289b411b363SPhilipp Reisner
w_e_end_ov_reply(struct drbd_work * w,int cancel)129099920dc5SAndreas Gruenbacher int w_e_end_ov_reply(struct drbd_work *w, int cancel)
1291b411b363SPhilipp Reisner {
1292a8cd15baSAndreas Gruenbacher struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
12936780139cSAndreas Gruenbacher struct drbd_peer_device *peer_device = peer_req->peer_device;
12946780139cSAndreas Gruenbacher struct drbd_device *device = peer_device->device;
1295b411b363SPhilipp Reisner struct digest_info *di;
1296b411b363SPhilipp Reisner void *digest;
1297db830c46SAndreas Gruenbacher sector_t sector = peer_req->i.sector;
1298db830c46SAndreas Gruenbacher unsigned int size = peer_req->i.size;
129953ea4331SLars Ellenberg int digest_size;
130099920dc5SAndreas Gruenbacher int err, eq = 0;
130158ffa580SLars Ellenberg bool stop_sector_reached = false;
1302b411b363SPhilipp Reisner
1303b411b363SPhilipp Reisner if (unlikely(cancel)) {
1304b30ab791SAndreas Gruenbacher drbd_free_peer_req(device, peer_req);
1305b30ab791SAndreas Gruenbacher dec_unacked(device);
130699920dc5SAndreas Gruenbacher return 0;
1307b411b363SPhilipp Reisner }
1308b411b363SPhilipp Reisner
1309b411b363SPhilipp Reisner /* after "cancel", because after drbd_disconnect/drbd_rs_cancel_all
1310b411b363SPhilipp Reisner * the resync lru has been cleaned up already */
1311b30ab791SAndreas Gruenbacher if (get_ldev(device)) {
1312b30ab791SAndreas Gruenbacher drbd_rs_complete_io(device, peer_req->i.sector);
1313b30ab791SAndreas Gruenbacher put_ldev(device);
13141d53f09eSLars Ellenberg }
1315b411b363SPhilipp Reisner
1316db830c46SAndreas Gruenbacher di = peer_req->digest;
1317b411b363SPhilipp Reisner
1318db830c46SAndreas Gruenbacher if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
13193d0e6375SKees Cook digest_size = crypto_shash_digestsize(peer_device->connection->verify_tfm);
1320b411b363SPhilipp Reisner digest = kmalloc(digest_size, GFP_NOIO);
1321b411b363SPhilipp Reisner if (digest) {
13226780139cSAndreas Gruenbacher drbd_csum_ee(peer_device->connection->verify_tfm, peer_req, digest);
1323b411b363SPhilipp Reisner
13240b0ba1efSAndreas Gruenbacher D_ASSERT(device, digest_size == di->digest_size);
1325b411b363SPhilipp Reisner eq = !memcmp(digest, di->digest, digest_size);
1326b411b363SPhilipp Reisner kfree(digest);
1327b411b363SPhilipp Reisner }
1328b411b363SPhilipp Reisner }
1329b411b363SPhilipp Reisner
13309676c760SLars Ellenberg /* Free peer_req and pages before send.
133153ea4331SLars Ellenberg * In case we block on congestion, we could otherwise run into
133253ea4331SLars Ellenberg * some distributed deadlock, if the other side blocks on
133353ea4331SLars Ellenberg * congestion as well, because our receiver blocks in
1334c37c8ecfSAndreas Gruenbacher * drbd_alloc_pages due to pp_in_use > max_buffers. */
1335b30ab791SAndreas Gruenbacher drbd_free_peer_req(device, peer_req);
1336b411b363SPhilipp Reisner if (!eq)
13370d11f3cfSChristoph Böhmwalder drbd_ov_out_of_sync_found(peer_device, sector, size);
1338b411b363SPhilipp Reisner else
13390d11f3cfSChristoph Böhmwalder ov_out_of_sync_print(peer_device);
1340b411b363SPhilipp Reisner
13416780139cSAndreas Gruenbacher err = drbd_send_ack_ex(peer_device, P_OV_RESULT, sector, size,
1342b411b363SPhilipp Reisner eq ? ID_IN_SYNC : ID_OUT_OF_SYNC);
1343b411b363SPhilipp Reisner
1344b30ab791SAndreas Gruenbacher dec_unacked(device);
1345b411b363SPhilipp Reisner
1346b30ab791SAndreas Gruenbacher --device->ov_left;
1347ea5442afSLars Ellenberg
1348ea5442afSLars Ellenberg /* let's advance progress step marks only for every other megabyte */
1349b30ab791SAndreas Gruenbacher if ((device->ov_left & 0x200) == 0x200)
13500d11f3cfSChristoph Böhmwalder drbd_advance_rs_marks(peer_device, device->ov_left);
1351ea5442afSLars Ellenberg
1352b30ab791SAndreas Gruenbacher stop_sector_reached = verify_can_do_stop_sector(device) &&
1353b30ab791SAndreas Gruenbacher (sector + (size>>9)) >= device->ov_stop_sector;
135458ffa580SLars Ellenberg
1355b30ab791SAndreas Gruenbacher if (device->ov_left == 0 || stop_sector_reached) {
13560d11f3cfSChristoph Böhmwalder ov_out_of_sync_print(peer_device);
13570d11f3cfSChristoph Böhmwalder drbd_resync_finished(peer_device);
1358b411b363SPhilipp Reisner }
1359b411b363SPhilipp Reisner
136099920dc5SAndreas Gruenbacher return err;
1361b411b363SPhilipp Reisner }
1362b411b363SPhilipp Reisner
1363b6dd1a89SLars Ellenberg /* FIXME
1364b6dd1a89SLars Ellenberg * We need to track the number of pending barrier acks,
1365b6dd1a89SLars Ellenberg * and to be able to wait for them.
1366b6dd1a89SLars Ellenberg * See also comment in drbd_adm_attach before drbd_suspend_io.
1367b6dd1a89SLars Ellenberg */
drbd_send_barrier(struct drbd_connection * connection)1368bde89a9eSAndreas Gruenbacher static int drbd_send_barrier(struct drbd_connection *connection)
1369b411b363SPhilipp Reisner {
13709f5bdc33SAndreas Gruenbacher struct p_barrier *p;
1371b6dd1a89SLars Ellenberg struct drbd_socket *sock;
1372b411b363SPhilipp Reisner
1373bde89a9eSAndreas Gruenbacher sock = &connection->data;
1374bde89a9eSAndreas Gruenbacher p = conn_prepare_command(connection, sock);
13759f5bdc33SAndreas Gruenbacher if (!p)
13769f5bdc33SAndreas Gruenbacher return -EIO;
1377bde89a9eSAndreas Gruenbacher p->barrier = connection->send.current_epoch_nr;
1378b6dd1a89SLars Ellenberg p->pad = 0;
1379bde89a9eSAndreas Gruenbacher connection->send.current_epoch_writes = 0;
138084d34f2fSLars Ellenberg connection->send.last_sent_barrier_jif = jiffies;
1381b6dd1a89SLars Ellenberg
1382bde89a9eSAndreas Gruenbacher return conn_send_command(connection, sock, P_BARRIER, sizeof(*p), NULL, 0);
1383b411b363SPhilipp Reisner }
1384b411b363SPhilipp Reisner
pd_send_unplug_remote(struct drbd_peer_device * pd)1385c51a0ef3SLars Ellenberg static int pd_send_unplug_remote(struct drbd_peer_device *pd)
1386c51a0ef3SLars Ellenberg {
1387c51a0ef3SLars Ellenberg struct drbd_socket *sock = &pd->connection->data;
1388c51a0ef3SLars Ellenberg if (!drbd_prepare_command(pd, sock))
1389c51a0ef3SLars Ellenberg return -EIO;
1390c51a0ef3SLars Ellenberg return drbd_send_command(pd, sock, P_UNPLUG_REMOTE, 0, NULL, 0);
1391c51a0ef3SLars Ellenberg }
1392c51a0ef3SLars Ellenberg
w_send_write_hint(struct drbd_work * w,int cancel)139399920dc5SAndreas Gruenbacher int w_send_write_hint(struct drbd_work *w, int cancel)
1394b411b363SPhilipp Reisner {
139584b8c06bSAndreas Gruenbacher struct drbd_device *device =
139684b8c06bSAndreas Gruenbacher container_of(w, struct drbd_device, unplug_work);
13979f5bdc33SAndreas Gruenbacher
1398b411b363SPhilipp Reisner if (cancel)
139999920dc5SAndreas Gruenbacher return 0;
1400c51a0ef3SLars Ellenberg return pd_send_unplug_remote(first_peer_device(device));
1401b411b363SPhilipp Reisner }
1402b411b363SPhilipp Reisner
re_init_if_first_write(struct drbd_connection * connection,unsigned int epoch)1403bde89a9eSAndreas Gruenbacher static void re_init_if_first_write(struct drbd_connection *connection, unsigned int epoch)
14044eb9b3cbSLars Ellenberg {
1405bde89a9eSAndreas Gruenbacher if (!connection->send.seen_any_write_yet) {
1406bde89a9eSAndreas Gruenbacher connection->send.seen_any_write_yet = true;
1407bde89a9eSAndreas Gruenbacher connection->send.current_epoch_nr = epoch;
1408bde89a9eSAndreas Gruenbacher connection->send.current_epoch_writes = 0;
140984d34f2fSLars Ellenberg connection->send.last_sent_barrier_jif = jiffies;
14104eb9b3cbSLars Ellenberg }
14114eb9b3cbSLars Ellenberg }
14124eb9b3cbSLars Ellenberg
maybe_send_barrier(struct drbd_connection * connection,unsigned int epoch)1413bde89a9eSAndreas Gruenbacher static void maybe_send_barrier(struct drbd_connection *connection, unsigned int epoch)
14144eb9b3cbSLars Ellenberg {
14154eb9b3cbSLars Ellenberg /* re-init if first write on this connection */
1416bde89a9eSAndreas Gruenbacher if (!connection->send.seen_any_write_yet)
14174eb9b3cbSLars Ellenberg return;
1418bde89a9eSAndreas Gruenbacher if (connection->send.current_epoch_nr != epoch) {
1419bde89a9eSAndreas Gruenbacher if (connection->send.current_epoch_writes)
1420bde89a9eSAndreas Gruenbacher drbd_send_barrier(connection);
1421bde89a9eSAndreas Gruenbacher connection->send.current_epoch_nr = epoch;
14224eb9b3cbSLars Ellenberg }
14234eb9b3cbSLars Ellenberg }
14244eb9b3cbSLars Ellenberg
w_send_out_of_sync(struct drbd_work * w,int cancel)14258f7bed77SAndreas Gruenbacher int w_send_out_of_sync(struct drbd_work *w, int cancel)
142673a01a18SPhilipp Reisner {
142773a01a18SPhilipp Reisner struct drbd_request *req = container_of(w, struct drbd_request, w);
142884b8c06bSAndreas Gruenbacher struct drbd_device *device = req->device;
142944a4d551SLars Ellenberg struct drbd_peer_device *const peer_device = first_peer_device(device);
143044a4d551SLars Ellenberg struct drbd_connection *const connection = peer_device->connection;
143199920dc5SAndreas Gruenbacher int err;
143273a01a18SPhilipp Reisner
143373a01a18SPhilipp Reisner if (unlikely(cancel)) {
1434ad878a0dSChristoph Böhmwalder req_mod(req, SEND_CANCELED, peer_device);
143599920dc5SAndreas Gruenbacher return 0;
143673a01a18SPhilipp Reisner }
1437e5f891b2SLars Ellenberg req->pre_send_jif = jiffies;
143873a01a18SPhilipp Reisner
1439bde89a9eSAndreas Gruenbacher /* this time, no connection->send.current_epoch_writes++;
1440b6dd1a89SLars Ellenberg * If it was sent, it was the closing barrier for the last
1441b6dd1a89SLars Ellenberg * replicated epoch, before we went into AHEAD mode.
1442b6dd1a89SLars Ellenberg * No more barriers will be sent, until we leave AHEAD mode again. */
1443bde89a9eSAndreas Gruenbacher maybe_send_barrier(connection, req->epoch);
1444b6dd1a89SLars Ellenberg
144544a4d551SLars Ellenberg err = drbd_send_out_of_sync(peer_device, req);
1446ad878a0dSChristoph Böhmwalder req_mod(req, OOS_HANDED_TO_NETWORK, peer_device);
144773a01a18SPhilipp Reisner
144899920dc5SAndreas Gruenbacher return err;
144973a01a18SPhilipp Reisner }
145073a01a18SPhilipp Reisner
1451b411b363SPhilipp Reisner /**
1452b411b363SPhilipp Reisner * w_send_dblock() - Worker callback to send a P_DATA packet in order to mirror a write request
1453b411b363SPhilipp Reisner * @w: work object.
1454b411b363SPhilipp Reisner * @cancel: The connection will be closed anyways
1455b411b363SPhilipp Reisner */
w_send_dblock(struct drbd_work * w,int cancel)145699920dc5SAndreas Gruenbacher int w_send_dblock(struct drbd_work *w, int cancel)
1457b411b363SPhilipp Reisner {
1458b411b363SPhilipp Reisner struct drbd_request *req = container_of(w, struct drbd_request, w);
145984b8c06bSAndreas Gruenbacher struct drbd_device *device = req->device;
146044a4d551SLars Ellenberg struct drbd_peer_device *const peer_device = first_peer_device(device);
146144a4d551SLars Ellenberg struct drbd_connection *connection = peer_device->connection;
1462c51a0ef3SLars Ellenberg bool do_send_unplug = req->rq_state & RQ_UNPLUG;
146399920dc5SAndreas Gruenbacher int err;
1464b411b363SPhilipp Reisner
1465b411b363SPhilipp Reisner if (unlikely(cancel)) {
1466ad878a0dSChristoph Böhmwalder req_mod(req, SEND_CANCELED, peer_device);
146799920dc5SAndreas Gruenbacher return 0;
1468b411b363SPhilipp Reisner }
1469e5f891b2SLars Ellenberg req->pre_send_jif = jiffies;
1470b411b363SPhilipp Reisner
1471bde89a9eSAndreas Gruenbacher re_init_if_first_write(connection, req->epoch);
1472bde89a9eSAndreas Gruenbacher maybe_send_barrier(connection, req->epoch);
1473bde89a9eSAndreas Gruenbacher connection->send.current_epoch_writes++;
1474b6dd1a89SLars Ellenberg
147544a4d551SLars Ellenberg err = drbd_send_dblock(peer_device, req);
1476ad878a0dSChristoph Böhmwalder req_mod(req, err ? SEND_FAILED : HANDED_OVER_TO_NETWORK, peer_device);
1477b411b363SPhilipp Reisner
1478c51a0ef3SLars Ellenberg if (do_send_unplug && !err)
1479c51a0ef3SLars Ellenberg pd_send_unplug_remote(peer_device);
1480c51a0ef3SLars Ellenberg
148199920dc5SAndreas Gruenbacher return err;
1482b411b363SPhilipp Reisner }
1483b411b363SPhilipp Reisner
1484b411b363SPhilipp Reisner /**
1485b411b363SPhilipp Reisner * w_send_read_req() - Worker callback to send a read request (P_DATA_REQUEST) packet
1486b411b363SPhilipp Reisner * @w: work object.
1487b411b363SPhilipp Reisner * @cancel: The connection will be closed anyways
1488b411b363SPhilipp Reisner */
w_send_read_req(struct drbd_work * w,int cancel)148999920dc5SAndreas Gruenbacher int w_send_read_req(struct drbd_work *w, int cancel)
1490b411b363SPhilipp Reisner {
1491b411b363SPhilipp Reisner struct drbd_request *req = container_of(w, struct drbd_request, w);
149284b8c06bSAndreas Gruenbacher struct drbd_device *device = req->device;
149344a4d551SLars Ellenberg struct drbd_peer_device *const peer_device = first_peer_device(device);
149444a4d551SLars Ellenberg struct drbd_connection *connection = peer_device->connection;
1495c51a0ef3SLars Ellenberg bool do_send_unplug = req->rq_state & RQ_UNPLUG;
149699920dc5SAndreas Gruenbacher int err;
1497b411b363SPhilipp Reisner
1498b411b363SPhilipp Reisner if (unlikely(cancel)) {
1499ad878a0dSChristoph Böhmwalder req_mod(req, SEND_CANCELED, peer_device);
150099920dc5SAndreas Gruenbacher return 0;
1501b411b363SPhilipp Reisner }
1502e5f891b2SLars Ellenberg req->pre_send_jif = jiffies;
1503b411b363SPhilipp Reisner
1504b6dd1a89SLars Ellenberg /* Even read requests may close a write epoch,
1505b6dd1a89SLars Ellenberg * if there was any yet. */
1506bde89a9eSAndreas Gruenbacher maybe_send_barrier(connection, req->epoch);
1507b6dd1a89SLars Ellenberg
150844a4d551SLars Ellenberg err = drbd_send_drequest(peer_device, P_DATA_REQUEST, req->i.sector, req->i.size,
1509b411b363SPhilipp Reisner (unsigned long)req);
1510b411b363SPhilipp Reisner
1511ad878a0dSChristoph Böhmwalder req_mod(req, err ? SEND_FAILED : HANDED_OVER_TO_NETWORK, peer_device);
1512b411b363SPhilipp Reisner
1513c51a0ef3SLars Ellenberg if (do_send_unplug && !err)
1514c51a0ef3SLars Ellenberg pd_send_unplug_remote(peer_device);
1515c51a0ef3SLars Ellenberg
151699920dc5SAndreas Gruenbacher return err;
1517b411b363SPhilipp Reisner }
1518b411b363SPhilipp Reisner
w_restart_disk_io(struct drbd_work * w,int cancel)151999920dc5SAndreas Gruenbacher int w_restart_disk_io(struct drbd_work *w, int cancel)
1520265be2d0SPhilipp Reisner {
1521265be2d0SPhilipp Reisner struct drbd_request *req = container_of(w, struct drbd_request, w);
152284b8c06bSAndreas Gruenbacher struct drbd_device *device = req->device;
1523265be2d0SPhilipp Reisner
15240778286aSPhilipp Reisner if (bio_data_dir(req->master_bio) == WRITE && req->rq_state & RQ_IN_ACT_LOG)
15254dd726f0SLars Ellenberg drbd_al_begin_io(device, &req->i);
1526265be2d0SPhilipp Reisner
1527abfc426dSChristoph Hellwig req->private_bio = bio_alloc_clone(device->ldev->backing_bdev,
1528abfc426dSChristoph Hellwig req->master_bio, GFP_NOIO,
1529ae7153f1SChristoph Hellwig &drbd_io_bio_set);
1530ae7153f1SChristoph Hellwig req->private_bio->bi_private = req;
1531ae7153f1SChristoph Hellwig req->private_bio->bi_end_io = drbd_request_endio;
1532ed00aabdSChristoph Hellwig submit_bio_noacct(req->private_bio);
1533265be2d0SPhilipp Reisner
153499920dc5SAndreas Gruenbacher return 0;
1535265be2d0SPhilipp Reisner }
1536265be2d0SPhilipp Reisner
_drbd_may_sync_now(struct drbd_device * device)1537b30ab791SAndreas Gruenbacher static int _drbd_may_sync_now(struct drbd_device *device)
1538b411b363SPhilipp Reisner {
1539b30ab791SAndreas Gruenbacher struct drbd_device *odev = device;
154095f8efd0SAndreas Gruenbacher int resync_after;
1541b411b363SPhilipp Reisner
1542b411b363SPhilipp Reisner while (1) {
1543a3f8f7dcSLars Ellenberg if (!odev->ldev || odev->state.disk == D_DISKLESS)
1544438c8374SPhilipp Reisner return 1;
1545daeda1ccSPhilipp Reisner rcu_read_lock();
154695f8efd0SAndreas Gruenbacher resync_after = rcu_dereference(odev->ldev->disk_conf)->resync_after;
1547daeda1ccSPhilipp Reisner rcu_read_unlock();
154895f8efd0SAndreas Gruenbacher if (resync_after == -1)
1549b411b363SPhilipp Reisner return 1;
1550b30ab791SAndreas Gruenbacher odev = minor_to_device(resync_after);
1551a3f8f7dcSLars Ellenberg if (!odev)
1552841ce241SAndreas Gruenbacher return 1;
1553b411b363SPhilipp Reisner if ((odev->state.conn >= C_SYNC_SOURCE &&
1554b411b363SPhilipp Reisner odev->state.conn <= C_PAUSED_SYNC_T) ||
1555b411b363SPhilipp Reisner odev->state.aftr_isp || odev->state.peer_isp ||
1556b411b363SPhilipp Reisner odev->state.user_isp)
1557b411b363SPhilipp Reisner return 0;
1558b411b363SPhilipp Reisner }
1559b411b363SPhilipp Reisner }
1560b411b363SPhilipp Reisner
1561b411b363SPhilipp Reisner /**
156228bc3b8cSAndreas Gruenbacher * drbd_pause_after() - Pause resync on all devices that may not resync now
1563b30ab791SAndreas Gruenbacher * @device: DRBD device.
1564b411b363SPhilipp Reisner *
1565b411b363SPhilipp Reisner * Called from process context only (admin command and after_state_ch).
1566b411b363SPhilipp Reisner */
drbd_pause_after(struct drbd_device * device)156728bc3b8cSAndreas Gruenbacher static bool drbd_pause_after(struct drbd_device *device)
1568b411b363SPhilipp Reisner {
156928bc3b8cSAndreas Gruenbacher bool changed = false;
157054761697SAndreas Gruenbacher struct drbd_device *odev;
157128bc3b8cSAndreas Gruenbacher int i;
1572b411b363SPhilipp Reisner
1573695d08faSPhilipp Reisner rcu_read_lock();
157405a10ec7SAndreas Gruenbacher idr_for_each_entry(&drbd_devices, odev, i) {
1575b411b363SPhilipp Reisner if (odev->state.conn == C_STANDALONE && odev->state.disk == D_DISKLESS)
1576b411b363SPhilipp Reisner continue;
157728bc3b8cSAndreas Gruenbacher if (!_drbd_may_sync_now(odev) &&
157828bc3b8cSAndreas Gruenbacher _drbd_set_state(_NS(odev, aftr_isp, 1),
157928bc3b8cSAndreas Gruenbacher CS_HARD, NULL) != SS_NOTHING_TO_DO)
158028bc3b8cSAndreas Gruenbacher changed = true;
1581b411b363SPhilipp Reisner }
1582695d08faSPhilipp Reisner rcu_read_unlock();
1583b411b363SPhilipp Reisner
158428bc3b8cSAndreas Gruenbacher return changed;
1585b411b363SPhilipp Reisner }
1586b411b363SPhilipp Reisner
1587b411b363SPhilipp Reisner /**
158828bc3b8cSAndreas Gruenbacher * drbd_resume_next() - Resume resync on all devices that may resync now
1589b30ab791SAndreas Gruenbacher * @device: DRBD device.
1590b411b363SPhilipp Reisner *
1591b411b363SPhilipp Reisner * Called from process context only (admin command and worker).
1592b411b363SPhilipp Reisner */
drbd_resume_next(struct drbd_device * device)159328bc3b8cSAndreas Gruenbacher static bool drbd_resume_next(struct drbd_device *device)
1594b411b363SPhilipp Reisner {
159528bc3b8cSAndreas Gruenbacher bool changed = false;
159654761697SAndreas Gruenbacher struct drbd_device *odev;
159728bc3b8cSAndreas Gruenbacher int i;
1598b411b363SPhilipp Reisner
1599695d08faSPhilipp Reisner rcu_read_lock();
160005a10ec7SAndreas Gruenbacher idr_for_each_entry(&drbd_devices, odev, i) {
1601b411b363SPhilipp Reisner if (odev->state.conn == C_STANDALONE && odev->state.disk == D_DISKLESS)
1602b411b363SPhilipp Reisner continue;
1603b411b363SPhilipp Reisner if (odev->state.aftr_isp) {
160428bc3b8cSAndreas Gruenbacher if (_drbd_may_sync_now(odev) &&
160528bc3b8cSAndreas Gruenbacher _drbd_set_state(_NS(odev, aftr_isp, 0),
160628bc3b8cSAndreas Gruenbacher CS_HARD, NULL) != SS_NOTHING_TO_DO)
160728bc3b8cSAndreas Gruenbacher changed = true;
1608b411b363SPhilipp Reisner }
1609b411b363SPhilipp Reisner }
1610695d08faSPhilipp Reisner rcu_read_unlock();
161128bc3b8cSAndreas Gruenbacher return changed;
1612b411b363SPhilipp Reisner }
1613b411b363SPhilipp Reisner
resume_next_sg(struct drbd_device * device)1614b30ab791SAndreas Gruenbacher void resume_next_sg(struct drbd_device *device)
1615b411b363SPhilipp Reisner {
161628bc3b8cSAndreas Gruenbacher lock_all_resources();
161728bc3b8cSAndreas Gruenbacher drbd_resume_next(device);
161828bc3b8cSAndreas Gruenbacher unlock_all_resources();
1619b411b363SPhilipp Reisner }
1620b411b363SPhilipp Reisner
suspend_other_sg(struct drbd_device * device)1621b30ab791SAndreas Gruenbacher void suspend_other_sg(struct drbd_device *device)
1622b411b363SPhilipp Reisner {
162328bc3b8cSAndreas Gruenbacher lock_all_resources();
162428bc3b8cSAndreas Gruenbacher drbd_pause_after(device);
162528bc3b8cSAndreas Gruenbacher unlock_all_resources();
1626b411b363SPhilipp Reisner }
1627b411b363SPhilipp Reisner
162828bc3b8cSAndreas Gruenbacher /* caller must lock_all_resources() */
drbd_resync_after_valid(struct drbd_device * device,int o_minor)1629b30ab791SAndreas Gruenbacher enum drbd_ret_code drbd_resync_after_valid(struct drbd_device *device, int o_minor)
1630b411b363SPhilipp Reisner {
163154761697SAndreas Gruenbacher struct drbd_device *odev;
163295f8efd0SAndreas Gruenbacher int resync_after;
1633b411b363SPhilipp Reisner
1634b411b363SPhilipp Reisner if (o_minor == -1)
1635b411b363SPhilipp Reisner return NO_ERROR;
1636a3f8f7dcSLars Ellenberg if (o_minor < -1 || o_minor > MINORMASK)
163795f8efd0SAndreas Gruenbacher return ERR_RESYNC_AFTER;
1638b411b363SPhilipp Reisner
1639b411b363SPhilipp Reisner /* check for loops */
1640b30ab791SAndreas Gruenbacher odev = minor_to_device(o_minor);
1641b411b363SPhilipp Reisner while (1) {
1642b30ab791SAndreas Gruenbacher if (odev == device)
164395f8efd0SAndreas Gruenbacher return ERR_RESYNC_AFTER_CYCLE;
1644b411b363SPhilipp Reisner
1645a3f8f7dcSLars Ellenberg /* You are free to depend on diskless, non-existing,
1646a3f8f7dcSLars Ellenberg * or not yet/no longer existing minors.
1647a3f8f7dcSLars Ellenberg * We only reject dependency loops.
1648a3f8f7dcSLars Ellenberg * We cannot follow the dependency chain beyond a detached or
1649a3f8f7dcSLars Ellenberg * missing minor.
1650a3f8f7dcSLars Ellenberg */
1651a3f8f7dcSLars Ellenberg if (!odev || !odev->ldev || odev->state.disk == D_DISKLESS)
1652a3f8f7dcSLars Ellenberg return NO_ERROR;
1653a3f8f7dcSLars Ellenberg
1654daeda1ccSPhilipp Reisner rcu_read_lock();
165595f8efd0SAndreas Gruenbacher resync_after = rcu_dereference(odev->ldev->disk_conf)->resync_after;
1656daeda1ccSPhilipp Reisner rcu_read_unlock();
1657b411b363SPhilipp Reisner /* dependency chain ends here, no cycles. */
165895f8efd0SAndreas Gruenbacher if (resync_after == -1)
1659b411b363SPhilipp Reisner return NO_ERROR;
1660b411b363SPhilipp Reisner
1661b411b363SPhilipp Reisner /* follow the dependency chain */
1662b30ab791SAndreas Gruenbacher odev = minor_to_device(resync_after);
1663b411b363SPhilipp Reisner }
1664b411b363SPhilipp Reisner }
1665b411b363SPhilipp Reisner
166628bc3b8cSAndreas Gruenbacher /* caller must lock_all_resources() */
drbd_resync_after_changed(struct drbd_device * device)1667b30ab791SAndreas Gruenbacher void drbd_resync_after_changed(struct drbd_device *device)
1668b411b363SPhilipp Reisner {
166928bc3b8cSAndreas Gruenbacher int changed;
1670b411b363SPhilipp Reisner
1671b411b363SPhilipp Reisner do {
167228bc3b8cSAndreas Gruenbacher changed = drbd_pause_after(device);
167328bc3b8cSAndreas Gruenbacher changed |= drbd_resume_next(device);
167428bc3b8cSAndreas Gruenbacher } while (changed);
1675b411b363SPhilipp Reisner }
1676b411b363SPhilipp Reisner
drbd_rs_controller_reset(struct drbd_peer_device * peer_device)16770d11f3cfSChristoph Böhmwalder void drbd_rs_controller_reset(struct drbd_peer_device *peer_device)
16789bd28d3cSLars Ellenberg {
16790d11f3cfSChristoph Böhmwalder struct drbd_device *device = peer_device->device;
16808c40c7c4SChristoph Hellwig struct gendisk *disk = device->ldev->backing_bdev->bd_disk;
1681813472ceSPhilipp Reisner struct fifo_buffer *plan;
1682813472ceSPhilipp Reisner
1683b30ab791SAndreas Gruenbacher atomic_set(&device->rs_sect_in, 0);
1684b30ab791SAndreas Gruenbacher atomic_set(&device->rs_sect_ev, 0);
1685b30ab791SAndreas Gruenbacher device->rs_in_flight = 0;
1686cb8432d6SChristoph Hellwig device->rs_last_events =
16878446fe92SChristoph Hellwig (int)part_stat_read_accum(disk->part0, sectors);
1688813472ceSPhilipp Reisner
1689813472ceSPhilipp Reisner /* Updating the RCU protected object in place is necessary since
1690813472ceSPhilipp Reisner this function gets called from atomic context.
1691813472ceSPhilipp Reisner It is valid since all other updates also lead to an completely
1692813472ceSPhilipp Reisner empty fifo */
1693813472ceSPhilipp Reisner rcu_read_lock();
1694b30ab791SAndreas Gruenbacher plan = rcu_dereference(device->rs_plan_s);
1695813472ceSPhilipp Reisner plan->total = 0;
1696813472ceSPhilipp Reisner fifo_set(plan, 0);
1697813472ceSPhilipp Reisner rcu_read_unlock();
16989bd28d3cSLars Ellenberg }
16999bd28d3cSLars Ellenberg
start_resync_timer_fn(struct timer_list * t)17002bccef39SKees Cook void start_resync_timer_fn(struct timer_list *t)
17011f04af33SPhilipp Reisner {
1702*41cb0855SIngo Molnar struct drbd_device *device = timer_container_of(device, t,
1703*41cb0855SIngo Molnar start_resync_timer);
1704ac0acb9eSLars Ellenberg drbd_device_post_work(device, RS_START);
17051f04af33SPhilipp Reisner }
17061f04af33SPhilipp Reisner
do_start_resync(struct drbd_device * device)1707ac0acb9eSLars Ellenberg static void do_start_resync(struct drbd_device *device)
17081f04af33SPhilipp Reisner {
1709b30ab791SAndreas Gruenbacher if (atomic_read(&device->unacked_cnt) || atomic_read(&device->rs_pending_cnt)) {
1710ac0acb9eSLars Ellenberg drbd_warn(device, "postponing start_resync ...\n");
1711b30ab791SAndreas Gruenbacher device->start_resync_timer.expires = jiffies + HZ/10;
1712b30ab791SAndreas Gruenbacher add_timer(&device->start_resync_timer);
1713ac0acb9eSLars Ellenberg return;
17141f04af33SPhilipp Reisner }
17151f04af33SPhilipp Reisner
1716b30ab791SAndreas Gruenbacher drbd_start_resync(device, C_SYNC_SOURCE);
1717b30ab791SAndreas Gruenbacher clear_bit(AHEAD_TO_SYNC_SOURCE, &device->flags);
17181f04af33SPhilipp Reisner }
17191f04af33SPhilipp Reisner
use_checksum_based_resync(struct drbd_connection * connection,struct drbd_device * device)1720aaaba345SLars Ellenberg static bool use_checksum_based_resync(struct drbd_connection *connection, struct drbd_device *device)
1721aaaba345SLars Ellenberg {
1722aaaba345SLars Ellenberg bool csums_after_crash_only;
1723aaaba345SLars Ellenberg rcu_read_lock();
1724aaaba345SLars Ellenberg csums_after_crash_only = rcu_dereference(connection->net_conf)->csums_after_crash_only;
1725aaaba345SLars Ellenberg rcu_read_unlock();
1726aaaba345SLars Ellenberg return connection->agreed_pro_version >= 89 && /* supported? */
1727aaaba345SLars Ellenberg connection->csums_tfm && /* configured? */
17287e5fec31SFabian Frederick (csums_after_crash_only == false /* use for each resync? */
1729aaaba345SLars Ellenberg || test_bit(CRASHED_PRIMARY, &device->flags)); /* or only after Primary crash? */
1730aaaba345SLars Ellenberg }
1731aaaba345SLars Ellenberg
1732b411b363SPhilipp Reisner /**
1733b411b363SPhilipp Reisner * drbd_start_resync() - Start the resync process
1734b30ab791SAndreas Gruenbacher * @device: DRBD device.
1735b411b363SPhilipp Reisner * @side: Either C_SYNC_SOURCE or C_SYNC_TARGET
1736b411b363SPhilipp Reisner *
1737b411b363SPhilipp Reisner * This function might bring you directly into one of the
1738b411b363SPhilipp Reisner * C_PAUSED_SYNC_* states.
1739b411b363SPhilipp Reisner */
drbd_start_resync(struct drbd_device * device,enum drbd_conns side)1740b30ab791SAndreas Gruenbacher void drbd_start_resync(struct drbd_device *device, enum drbd_conns side)
1741b411b363SPhilipp Reisner {
174244a4d551SLars Ellenberg struct drbd_peer_device *peer_device = first_peer_device(device);
174344a4d551SLars Ellenberg struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
1744b411b363SPhilipp Reisner union drbd_state ns;
1745b411b363SPhilipp Reisner int r;
1746b411b363SPhilipp Reisner
1747b30ab791SAndreas Gruenbacher if (device->state.conn >= C_SYNC_SOURCE && device->state.conn < C_AHEAD) {
1748d0180171SAndreas Gruenbacher drbd_err(device, "Resync already running!\n");
1749b411b363SPhilipp Reisner return;
1750b411b363SPhilipp Reisner }
1751b411b363SPhilipp Reisner
1752d3d2948fSRoland Kammerer if (!connection) {
1753d3d2948fSRoland Kammerer drbd_err(device, "No connection to peer, aborting!\n");
1754d3d2948fSRoland Kammerer return;
1755d3d2948fSRoland Kammerer }
1756d3d2948fSRoland Kammerer
1757b30ab791SAndreas Gruenbacher if (!test_bit(B_RS_H_DONE, &device->flags)) {
1758b411b363SPhilipp Reisner if (side == C_SYNC_TARGET) {
1759b411b363SPhilipp Reisner /* Since application IO was locked out during C_WF_BITMAP_T and
1760b411b363SPhilipp Reisner C_WF_SYNC_UUID we are still unmodified. Before going to C_SYNC_TARGET
1761b411b363SPhilipp Reisner we check that we might make the data inconsistent. */
1762b30ab791SAndreas Gruenbacher r = drbd_khelper(device, "before-resync-target");
1763b411b363SPhilipp Reisner r = (r >> 8) & 0xff;
1764b411b363SPhilipp Reisner if (r > 0) {
1765d0180171SAndreas Gruenbacher drbd_info(device, "before-resync-target handler returned %d, "
1766b411b363SPhilipp Reisner "dropping connection.\n", r);
176744a4d551SLars Ellenberg conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
1768b411b363SPhilipp Reisner return;
1769b411b363SPhilipp Reisner }
177009b9e797SPhilipp Reisner } else /* C_SYNC_SOURCE */ {
1771b30ab791SAndreas Gruenbacher r = drbd_khelper(device, "before-resync-source");
177209b9e797SPhilipp Reisner r = (r >> 8) & 0xff;
177309b9e797SPhilipp Reisner if (r > 0) {
177409b9e797SPhilipp Reisner if (r == 3) {
1775d0180171SAndreas Gruenbacher drbd_info(device, "before-resync-source handler returned %d, "
177609b9e797SPhilipp Reisner "ignoring. Old userland tools?", r);
177709b9e797SPhilipp Reisner } else {
1778d0180171SAndreas Gruenbacher drbd_info(device, "before-resync-source handler returned %d, "
177909b9e797SPhilipp Reisner "dropping connection.\n", r);
178044a4d551SLars Ellenberg conn_request_state(connection,
1781a6b32bc3SAndreas Gruenbacher NS(conn, C_DISCONNECTING), CS_HARD);
178209b9e797SPhilipp Reisner return;
178309b9e797SPhilipp Reisner }
178409b9e797SPhilipp Reisner }
1785b411b363SPhilipp Reisner }
1786e64a3294SPhilipp Reisner }
1787b411b363SPhilipp Reisner
178844a4d551SLars Ellenberg if (current == connection->worker.task) {
1789dad20554SPhilipp Reisner /* The worker should not sleep waiting for state_mutex,
1790e64a3294SPhilipp Reisner that can take long */
1791b30ab791SAndreas Gruenbacher if (!mutex_trylock(device->state_mutex)) {
1792b30ab791SAndreas Gruenbacher set_bit(B_RS_H_DONE, &device->flags);
1793b30ab791SAndreas Gruenbacher device->start_resync_timer.expires = jiffies + HZ/5;
1794b30ab791SAndreas Gruenbacher add_timer(&device->start_resync_timer);
1795e64a3294SPhilipp Reisner return;
1796e64a3294SPhilipp Reisner }
1797e64a3294SPhilipp Reisner } else {
1798b30ab791SAndreas Gruenbacher mutex_lock(device->state_mutex);
1799e64a3294SPhilipp Reisner }
1800b411b363SPhilipp Reisner
180128bc3b8cSAndreas Gruenbacher lock_all_resources();
180228bc3b8cSAndreas Gruenbacher clear_bit(B_RS_H_DONE, &device->flags);
1803a700471bSPhilipp Reisner /* Did some connection breakage or IO error race with us? */
1804b30ab791SAndreas Gruenbacher if (device->state.conn < C_CONNECTED
1805b30ab791SAndreas Gruenbacher || !get_ldev_if_state(device, D_NEGOTIATING)) {
180628bc3b8cSAndreas Gruenbacher unlock_all_resources();
180728bc3b8cSAndreas Gruenbacher goto out;
1808b411b363SPhilipp Reisner }
1809b411b363SPhilipp Reisner
1810b30ab791SAndreas Gruenbacher ns = drbd_read_state(device);
1811b411b363SPhilipp Reisner
1812b30ab791SAndreas Gruenbacher ns.aftr_isp = !_drbd_may_sync_now(device);
1813b411b363SPhilipp Reisner
1814b411b363SPhilipp Reisner ns.conn = side;
1815b411b363SPhilipp Reisner
1816b411b363SPhilipp Reisner if (side == C_SYNC_TARGET)
1817b411b363SPhilipp Reisner ns.disk = D_INCONSISTENT;
1818b411b363SPhilipp Reisner else /* side == C_SYNC_SOURCE */
1819b411b363SPhilipp Reisner ns.pdsk = D_INCONSISTENT;
1820b411b363SPhilipp Reisner
182128bc3b8cSAndreas Gruenbacher r = _drbd_set_state(device, ns, CS_VERBOSE, NULL);
1822b30ab791SAndreas Gruenbacher ns = drbd_read_state(device);
1823b411b363SPhilipp Reisner
1824b411b363SPhilipp Reisner if (ns.conn < C_CONNECTED)
1825b411b363SPhilipp Reisner r = SS_UNKNOWN_ERROR;
1826b411b363SPhilipp Reisner
1827b411b363SPhilipp Reisner if (r == SS_SUCCESS) {
1828b30ab791SAndreas Gruenbacher unsigned long tw = drbd_bm_total_weight(device);
18291d7734a0SLars Ellenberg unsigned long now = jiffies;
18301d7734a0SLars Ellenberg int i;
18311d7734a0SLars Ellenberg
1832b30ab791SAndreas Gruenbacher device->rs_failed = 0;
1833b30ab791SAndreas Gruenbacher device->rs_paused = 0;
1834b30ab791SAndreas Gruenbacher device->rs_same_csum = 0;
1835b30ab791SAndreas Gruenbacher device->rs_last_sect_ev = 0;
1836b30ab791SAndreas Gruenbacher device->rs_total = tw;
1837b30ab791SAndreas Gruenbacher device->rs_start = now;
18381d7734a0SLars Ellenberg for (i = 0; i < DRBD_SYNC_MARKS; i++) {
1839b30ab791SAndreas Gruenbacher device->rs_mark_left[i] = tw;
1840b30ab791SAndreas Gruenbacher device->rs_mark_time[i] = now;
18411d7734a0SLars Ellenberg }
184228bc3b8cSAndreas Gruenbacher drbd_pause_after(device);
18435ab7d2c0SLars Ellenberg /* Forget potentially stale cached per resync extent bit-counts.
18445ab7d2c0SLars Ellenberg * Open coded drbd_rs_cancel_all(device), we already have IRQs
18455ab7d2c0SLars Ellenberg * disabled, and know the disk state is ok. */
18465ab7d2c0SLars Ellenberg spin_lock(&device->al_lock);
18475ab7d2c0SLars Ellenberg lc_reset(device->resync);
18485ab7d2c0SLars Ellenberg device->resync_locked = 0;
18495ab7d2c0SLars Ellenberg device->resync_wenr = LC_FREE;
18505ab7d2c0SLars Ellenberg spin_unlock(&device->al_lock);
1851b411b363SPhilipp Reisner }
185228bc3b8cSAndreas Gruenbacher unlock_all_resources();
18535a22db89SLars Ellenberg
18546c922ed5SLars Ellenberg if (r == SS_SUCCESS) {
18555ab7d2c0SLars Ellenberg wake_up(&device->al_wait); /* for lc_reset() above */
1856328e0f12SPhilipp Reisner /* reset rs_last_bcast when a resync or verify is started,
1857328e0f12SPhilipp Reisner * to deal with potential jiffies wrap. */
1858b30ab791SAndreas Gruenbacher device->rs_last_bcast = jiffies - HZ;
1859328e0f12SPhilipp Reisner
1860d0180171SAndreas Gruenbacher drbd_info(device, "Began resync as %s (will sync %lu KB [%lu bits set]).\n",
18616c922ed5SLars Ellenberg drbd_conn_str(ns.conn),
1862b30ab791SAndreas Gruenbacher (unsigned long) device->rs_total << (BM_BLOCK_SHIFT-10),
1863b30ab791SAndreas Gruenbacher (unsigned long) device->rs_total);
1864aaaba345SLars Ellenberg if (side == C_SYNC_TARGET) {
1865b30ab791SAndreas Gruenbacher device->bm_resync_fo = 0;
1866aaaba345SLars Ellenberg device->use_csums = use_checksum_based_resync(connection, device);
1867aaaba345SLars Ellenberg } else {
18687e5fec31SFabian Frederick device->use_csums = false;
1869aaaba345SLars Ellenberg }
18705a22db89SLars Ellenberg
18715a22db89SLars Ellenberg /* Since protocol 96, we must serialize drbd_gen_and_send_sync_uuid
18725a22db89SLars Ellenberg * with w_send_oos, or the sync target will get confused as to
18735a22db89SLars Ellenberg * how much bits to resync. We cannot do that always, because for an
18745a22db89SLars Ellenberg * empty resync and protocol < 95, we need to do it here, as we call
18755a22db89SLars Ellenberg * drbd_resync_finished from here in that case.
18765a22db89SLars Ellenberg * We drbd_gen_and_send_sync_uuid here for protocol < 96,
18775a22db89SLars Ellenberg * and from after_state_ch otherwise. */
187844a4d551SLars Ellenberg if (side == C_SYNC_SOURCE && connection->agreed_pro_version < 96)
187944a4d551SLars Ellenberg drbd_gen_and_send_sync_uuid(peer_device);
1880b411b363SPhilipp Reisner
188144a4d551SLars Ellenberg if (connection->agreed_pro_version < 95 && device->rs_total == 0) {
1882af85e8e8SLars Ellenberg /* This still has a race (about when exactly the peers
1883af85e8e8SLars Ellenberg * detect connection loss) that can lead to a full sync
1884af85e8e8SLars Ellenberg * on next handshake. In 8.3.9 we fixed this with explicit
1885af85e8e8SLars Ellenberg * resync-finished notifications, but the fix
1886af85e8e8SLars Ellenberg * introduces a protocol change. Sleeping for some
1887af85e8e8SLars Ellenberg * time longer than the ping interval + timeout on the
1888af85e8e8SLars Ellenberg * SyncSource, to give the SyncTarget the chance to
1889af85e8e8SLars Ellenberg * detect connection loss, then waiting for a ping
1890af85e8e8SLars Ellenberg * response (implicit in drbd_resync_finished) reduces
1891af85e8e8SLars Ellenberg * the race considerably, but does not solve it. */
189244ed167dSPhilipp Reisner if (side == C_SYNC_SOURCE) {
189344ed167dSPhilipp Reisner struct net_conf *nc;
189444ed167dSPhilipp Reisner int timeo;
189544ed167dSPhilipp Reisner
189644ed167dSPhilipp Reisner rcu_read_lock();
189744a4d551SLars Ellenberg nc = rcu_dereference(connection->net_conf);
189844ed167dSPhilipp Reisner timeo = nc->ping_int * HZ + nc->ping_timeo * HZ / 9;
189944ed167dSPhilipp Reisner rcu_read_unlock();
190044ed167dSPhilipp Reisner schedule_timeout_interruptible(timeo);
190144ed167dSPhilipp Reisner }
19020d11f3cfSChristoph Böhmwalder drbd_resync_finished(peer_device);
1903b411b363SPhilipp Reisner }
1904b411b363SPhilipp Reisner
19050d11f3cfSChristoph Böhmwalder drbd_rs_controller_reset(peer_device);
1906b30ab791SAndreas Gruenbacher /* ns.conn may already be != device->state.conn,
1907b411b363SPhilipp Reisner * we may have been paused in between, or become paused until
1908b411b363SPhilipp Reisner * the timer triggers.
1909b411b363SPhilipp Reisner * No matter, that is handled in resync_timer_fn() */
1910b411b363SPhilipp Reisner if (ns.conn == C_SYNC_TARGET)
1911b30ab791SAndreas Gruenbacher mod_timer(&device->resync_timer, jiffies);
1912b411b363SPhilipp Reisner
1913b30ab791SAndreas Gruenbacher drbd_md_sync(device);
1914b411b363SPhilipp Reisner }
1915b30ab791SAndreas Gruenbacher put_ldev(device);
191628bc3b8cSAndreas Gruenbacher out:
1917b30ab791SAndreas Gruenbacher mutex_unlock(device->state_mutex);
1918b411b363SPhilipp Reisner }
1919b411b363SPhilipp Reisner
update_on_disk_bitmap(struct drbd_peer_device * peer_device,bool resync_done)19200d11f3cfSChristoph Böhmwalder static void update_on_disk_bitmap(struct drbd_peer_device *peer_device, bool resync_done)
1921c7a58db4SLars Ellenberg {
19220d11f3cfSChristoph Böhmwalder struct drbd_device *device = peer_device->device;
1923c7a58db4SLars Ellenberg struct sib_info sib = { .sib_reason = SIB_SYNC_PROGRESS, };
1924c7a58db4SLars Ellenberg device->rs_last_bcast = jiffies;
1925c7a58db4SLars Ellenberg
1926c7a58db4SLars Ellenberg if (!get_ldev(device))
1927c7a58db4SLars Ellenberg return;
1928c7a58db4SLars Ellenberg
1929c7a58db4SLars Ellenberg drbd_bm_write_lazy(device, 0);
19305ab7d2c0SLars Ellenberg if (resync_done && is_sync_state(device->state.conn))
19310d11f3cfSChristoph Böhmwalder drbd_resync_finished(peer_device);
19325ab7d2c0SLars Ellenberg
1933c7a58db4SLars Ellenberg drbd_bcast_event(device, &sib);
1934c7a58db4SLars Ellenberg /* update timestamp, in case it took a while to write out stuff */
1935c7a58db4SLars Ellenberg device->rs_last_bcast = jiffies;
1936c7a58db4SLars Ellenberg put_ldev(device);
1937c7a58db4SLars Ellenberg }
1938c7a58db4SLars Ellenberg
drbd_ldev_destroy(struct drbd_device * device)1939e334f550SLars Ellenberg static void drbd_ldev_destroy(struct drbd_device *device)
1940e334f550SLars Ellenberg {
1941e334f550SLars Ellenberg lc_destroy(device->resync);
1942e334f550SLars Ellenberg device->resync = NULL;
1943e334f550SLars Ellenberg lc_destroy(device->act_log);
1944e334f550SLars Ellenberg device->act_log = NULL;
1945d1b80853SAndreas Gruenbacher
1946d1b80853SAndreas Gruenbacher __acquire(local);
194763a7c8adSLars Ellenberg drbd_backing_dev_free(device, device->ldev);
1948d1b80853SAndreas Gruenbacher device->ldev = NULL;
1949d1b80853SAndreas Gruenbacher __release(local);
1950d1b80853SAndreas Gruenbacher
1951e334f550SLars Ellenberg clear_bit(GOING_DISKLESS, &device->flags);
1952e334f550SLars Ellenberg wake_up(&device->misc_wait);
1953e334f550SLars Ellenberg }
1954e334f550SLars Ellenberg
go_diskless(struct drbd_device * device)1955e334f550SLars Ellenberg static void go_diskless(struct drbd_device *device)
1956e334f550SLars Ellenberg {
19578164dd6cSAndreas Gruenbacher struct drbd_peer_device *peer_device = first_peer_device(device);
1958e334f550SLars Ellenberg D_ASSERT(device, device->state.disk == D_FAILED);
1959e334f550SLars Ellenberg /* we cannot assert local_cnt == 0 here, as get_ldev_if_state will
1960e334f550SLars Ellenberg * inc/dec it frequently. Once we are D_DISKLESS, no one will touch
1961e334f550SLars Ellenberg * the protected members anymore, though, so once put_ldev reaches zero
1962e334f550SLars Ellenberg * again, it will be safe to free them. */
1963e334f550SLars Ellenberg
1964e334f550SLars Ellenberg /* Try to write changed bitmap pages, read errors may have just
1965e334f550SLars Ellenberg * set some bits outside the area covered by the activity log.
1966e334f550SLars Ellenberg *
1967e334f550SLars Ellenberg * If we have an IO error during the bitmap writeout,
1968e334f550SLars Ellenberg * we will want a full sync next time, just in case.
1969e334f550SLars Ellenberg * (Do we want a specific meta data flag for this?)
1970e334f550SLars Ellenberg *
1971e334f550SLars Ellenberg * If that does not make it to stable storage either,
1972e334f550SLars Ellenberg * we cannot do anything about that anymore.
1973e334f550SLars Ellenberg *
1974e334f550SLars Ellenberg * We still need to check if both bitmap and ldev are present, we may
1975e334f550SLars Ellenberg * end up here after a failed attach, before ldev was even assigned.
1976e334f550SLars Ellenberg */
1977e334f550SLars Ellenberg if (device->bitmap && device->ldev) {
1978e334f550SLars Ellenberg /* An interrupted resync or similar is allowed to recounts bits
1979e334f550SLars Ellenberg * while we detach.
1980e334f550SLars Ellenberg * Any modifications would not be expected anymore, though.
1981e334f550SLars Ellenberg */
1982e334f550SLars Ellenberg if (drbd_bitmap_io_from_worker(device, drbd_bm_write,
19838164dd6cSAndreas Gruenbacher "detach", BM_LOCKED_TEST_ALLOWED, peer_device)) {
1984e334f550SLars Ellenberg if (test_bit(WAS_READ_ERROR, &device->flags)) {
1985e334f550SLars Ellenberg drbd_md_set_flag(device, MDF_FULL_SYNC);
1986e334f550SLars Ellenberg drbd_md_sync(device);
1987e334f550SLars Ellenberg }
1988e334f550SLars Ellenberg }
1989e334f550SLars Ellenberg }
1990e334f550SLars Ellenberg
1991e334f550SLars Ellenberg drbd_force_state(device, NS(disk, D_DISKLESS));
1992e334f550SLars Ellenberg }
1993e334f550SLars Ellenberg
do_md_sync(struct drbd_device * device)1994ac0acb9eSLars Ellenberg static int do_md_sync(struct drbd_device *device)
1995ac0acb9eSLars Ellenberg {
1996ac0acb9eSLars Ellenberg drbd_warn(device, "md_sync_timer expired! Worker calls drbd_md_sync().\n");
1997ac0acb9eSLars Ellenberg drbd_md_sync(device);
1998ac0acb9eSLars Ellenberg return 0;
1999ac0acb9eSLars Ellenberg }
2000ac0acb9eSLars Ellenberg
2001944410e9SLars Ellenberg /* only called from drbd_worker thread, no locking */
__update_timing_details(struct drbd_thread_timing_details * tdp,unsigned int * cb_nr,void * cb,const char * fn,const unsigned int line)2002944410e9SLars Ellenberg void __update_timing_details(
2003944410e9SLars Ellenberg struct drbd_thread_timing_details *tdp,
2004944410e9SLars Ellenberg unsigned int *cb_nr,
2005944410e9SLars Ellenberg void *cb,
2006944410e9SLars Ellenberg const char *fn, const unsigned int line)
2007944410e9SLars Ellenberg {
2008944410e9SLars Ellenberg unsigned int i = *cb_nr % DRBD_THREAD_DETAILS_HIST;
2009944410e9SLars Ellenberg struct drbd_thread_timing_details *td = tdp + i;
2010944410e9SLars Ellenberg
2011944410e9SLars Ellenberg td->start_jif = jiffies;
2012944410e9SLars Ellenberg td->cb_addr = cb;
2013944410e9SLars Ellenberg td->caller_fn = fn;
2014944410e9SLars Ellenberg td->line = line;
2015944410e9SLars Ellenberg td->cb_nr = *cb_nr;
2016944410e9SLars Ellenberg
2017944410e9SLars Ellenberg i = (i+1) % DRBD_THREAD_DETAILS_HIST;
2018944410e9SLars Ellenberg td = tdp + i;
2019944410e9SLars Ellenberg memset(td, 0, sizeof(*td));
2020944410e9SLars Ellenberg
2021944410e9SLars Ellenberg ++(*cb_nr);
2022944410e9SLars Ellenberg }
2023944410e9SLars Ellenberg
do_device_work(struct drbd_device * device,const unsigned long todo)2024e334f550SLars Ellenberg static void do_device_work(struct drbd_device *device, const unsigned long todo)
2025e334f550SLars Ellenberg {
2026b47a06d1SAndreas Gruenbacher if (test_bit(MD_SYNC, &todo))
2027ac0acb9eSLars Ellenberg do_md_sync(device);
2028b47a06d1SAndreas Gruenbacher if (test_bit(RS_DONE, &todo) ||
2029b47a06d1SAndreas Gruenbacher test_bit(RS_PROGRESS, &todo))
20300d11f3cfSChristoph Böhmwalder update_on_disk_bitmap(first_peer_device(device), test_bit(RS_DONE, &todo));
2031b47a06d1SAndreas Gruenbacher if (test_bit(GO_DISKLESS, &todo))
2032e334f550SLars Ellenberg go_diskless(device);
2033b47a06d1SAndreas Gruenbacher if (test_bit(DESTROY_DISK, &todo))
2034e334f550SLars Ellenberg drbd_ldev_destroy(device);
2035b47a06d1SAndreas Gruenbacher if (test_bit(RS_START, &todo))
2036ac0acb9eSLars Ellenberg do_start_resync(device);
2037e334f550SLars Ellenberg }
2038e334f550SLars Ellenberg
2039e334f550SLars Ellenberg #define DRBD_DEVICE_WORK_MASK \
2040e334f550SLars Ellenberg ((1UL << GO_DISKLESS) \
2041e334f550SLars Ellenberg |(1UL << DESTROY_DISK) \
2042ac0acb9eSLars Ellenberg |(1UL << MD_SYNC) \
2043ac0acb9eSLars Ellenberg |(1UL << RS_START) \
2044e334f550SLars Ellenberg |(1UL << RS_PROGRESS) \
2045e334f550SLars Ellenberg |(1UL << RS_DONE) \
2046e334f550SLars Ellenberg )
2047e334f550SLars Ellenberg
get_work_bits(unsigned long * flags)2048e334f550SLars Ellenberg static unsigned long get_work_bits(unsigned long *flags)
2049e334f550SLars Ellenberg {
2050e334f550SLars Ellenberg unsigned long old, new;
2051e334f550SLars Ellenberg do {
2052e334f550SLars Ellenberg old = *flags;
2053e334f550SLars Ellenberg new = old & ~DRBD_DEVICE_WORK_MASK;
2054e334f550SLars Ellenberg } while (cmpxchg(flags, old, new) != old);
2055e334f550SLars Ellenberg return old & DRBD_DEVICE_WORK_MASK;
2056e334f550SLars Ellenberg }
2057e334f550SLars Ellenberg
do_unqueued_work(struct drbd_connection * connection)2058e334f550SLars Ellenberg static void do_unqueued_work(struct drbd_connection *connection)
2059c7a58db4SLars Ellenberg {
2060c7a58db4SLars Ellenberg struct drbd_peer_device *peer_device;
2061c7a58db4SLars Ellenberg int vnr;
2062c7a58db4SLars Ellenberg
2063c7a58db4SLars Ellenberg rcu_read_lock();
2064c7a58db4SLars Ellenberg idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
2065c7a58db4SLars Ellenberg struct drbd_device *device = peer_device->device;
2066e334f550SLars Ellenberg unsigned long todo = get_work_bits(&device->flags);
2067e334f550SLars Ellenberg if (!todo)
2068c7a58db4SLars Ellenberg continue;
20695ab7d2c0SLars Ellenberg
2070c7a58db4SLars Ellenberg kref_get(&device->kref);
2071c7a58db4SLars Ellenberg rcu_read_unlock();
2072e334f550SLars Ellenberg do_device_work(device, todo);
2073c7a58db4SLars Ellenberg kref_put(&device->kref, drbd_destroy_device);
2074c7a58db4SLars Ellenberg rcu_read_lock();
2075c7a58db4SLars Ellenberg }
2076c7a58db4SLars Ellenberg rcu_read_unlock();
2077c7a58db4SLars Ellenberg }
2078c7a58db4SLars Ellenberg
dequeue_work_batch(struct drbd_work_queue * queue,struct list_head * work_list)2079a186e478SRashika Kheria static bool dequeue_work_batch(struct drbd_work_queue *queue, struct list_head *work_list)
20808c0785a5SLars Ellenberg {
20818c0785a5SLars Ellenberg spin_lock_irq(&queue->q_lock);
208215e26f6aSLars Ellenberg list_splice_tail_init(&queue->q, work_list);
20838c0785a5SLars Ellenberg spin_unlock_irq(&queue->q_lock);
20848c0785a5SLars Ellenberg return !list_empty(work_list);
20858c0785a5SLars Ellenberg }
20868c0785a5SLars Ellenberg
wait_for_work(struct drbd_connection * connection,struct list_head * work_list)2087bde89a9eSAndreas Gruenbacher static void wait_for_work(struct drbd_connection *connection, struct list_head *work_list)
2088b6dd1a89SLars Ellenberg {
2089b6dd1a89SLars Ellenberg DEFINE_WAIT(wait);
2090b6dd1a89SLars Ellenberg struct net_conf *nc;
2091b6dd1a89SLars Ellenberg int uncork, cork;
2092b6dd1a89SLars Ellenberg
2093abde9cc6SLars Ellenberg dequeue_work_batch(&connection->sender_work, work_list);
2094b6dd1a89SLars Ellenberg if (!list_empty(work_list))
2095b6dd1a89SLars Ellenberg return;
2096b6dd1a89SLars Ellenberg
2097b6dd1a89SLars Ellenberg /* Still nothing to do?
2098b6dd1a89SLars Ellenberg * Maybe we still need to close the current epoch,
2099b6dd1a89SLars Ellenberg * even if no new requests are queued yet.
2100b6dd1a89SLars Ellenberg *
2101b6dd1a89SLars Ellenberg * Also, poke TCP, just in case.
2102b6dd1a89SLars Ellenberg * Then wait for new work (or signal). */
2103b6dd1a89SLars Ellenberg rcu_read_lock();
2104b6dd1a89SLars Ellenberg nc = rcu_dereference(connection->net_conf);
2105b6dd1a89SLars Ellenberg uncork = nc ? nc->tcp_cork : 0;
2106b6dd1a89SLars Ellenberg rcu_read_unlock();
2107b6dd1a89SLars Ellenberg if (uncork) {
2108b6dd1a89SLars Ellenberg mutex_lock(&connection->data.mutex);
2109b6dd1a89SLars Ellenberg if (connection->data.socket)
2110db10538aSChristoph Hellwig tcp_sock_set_cork(connection->data.socket->sk, false);
2111b6dd1a89SLars Ellenberg mutex_unlock(&connection->data.mutex);
2112b6dd1a89SLars Ellenberg }
2113b6dd1a89SLars Ellenberg
2114b6dd1a89SLars Ellenberg for (;;) {
2115b6dd1a89SLars Ellenberg int send_barrier;
2116b6dd1a89SLars Ellenberg prepare_to_wait(&connection->sender_work.q_wait, &wait, TASK_INTERRUPTIBLE);
21170500813fSAndreas Gruenbacher spin_lock_irq(&connection->resource->req_lock);
2118b6dd1a89SLars Ellenberg spin_lock(&connection->sender_work.q_lock); /* FIXME get rid of this one? */
2119bc317a9eSLars Ellenberg if (!list_empty(&connection->sender_work.q))
21204dd726f0SLars Ellenberg list_splice_tail_init(&connection->sender_work.q, work_list);
2121b6dd1a89SLars Ellenberg spin_unlock(&connection->sender_work.q_lock); /* FIXME get rid of this one? */
2122b6dd1a89SLars Ellenberg if (!list_empty(work_list) || signal_pending(current)) {
21230500813fSAndreas Gruenbacher spin_unlock_irq(&connection->resource->req_lock);
2124b6dd1a89SLars Ellenberg break;
2125b6dd1a89SLars Ellenberg }
2126f9c78128SLars Ellenberg
2127f9c78128SLars Ellenberg /* We found nothing new to do, no to-be-communicated request,
2128f9c78128SLars Ellenberg * no other work item. We may still need to close the last
2129f9c78128SLars Ellenberg * epoch. Next incoming request epoch will be connection ->
2130f9c78128SLars Ellenberg * current transfer log epoch number. If that is different
2131f9c78128SLars Ellenberg * from the epoch of the last request we communicated, it is
2132f9c78128SLars Ellenberg * safe to send the epoch separating barrier now.
2133f9c78128SLars Ellenberg */
2134f9c78128SLars Ellenberg send_barrier =
2135f9c78128SLars Ellenberg atomic_read(&connection->current_tle_nr) !=
2136f9c78128SLars Ellenberg connection->send.current_epoch_nr;
21370500813fSAndreas Gruenbacher spin_unlock_irq(&connection->resource->req_lock);
2138f9c78128SLars Ellenberg
2139f9c78128SLars Ellenberg if (send_barrier)
2140f9c78128SLars Ellenberg maybe_send_barrier(connection,
2141f9c78128SLars Ellenberg connection->send.current_epoch_nr + 1);
21425ab7d2c0SLars Ellenberg
2143e334f550SLars Ellenberg if (test_bit(DEVICE_WORK_PENDING, &connection->flags))
21445ab7d2c0SLars Ellenberg break;
21455ab7d2c0SLars Ellenberg
2146a80ca1aeSLars Ellenberg /* drbd_send() may have called flush_signals() */
2147a80ca1aeSLars Ellenberg if (get_t_state(&connection->worker) != RUNNING)
2148a80ca1aeSLars Ellenberg break;
21495ab7d2c0SLars Ellenberg
2150b6dd1a89SLars Ellenberg schedule();
2151b6dd1a89SLars Ellenberg /* may be woken up for other things but new work, too,
2152b6dd1a89SLars Ellenberg * e.g. if the current epoch got closed.
2153b6dd1a89SLars Ellenberg * In which case we send the barrier above. */
2154b6dd1a89SLars Ellenberg }
2155b6dd1a89SLars Ellenberg finish_wait(&connection->sender_work.q_wait, &wait);
2156b6dd1a89SLars Ellenberg
2157b6dd1a89SLars Ellenberg /* someone may have changed the config while we have been waiting above. */
2158b6dd1a89SLars Ellenberg rcu_read_lock();
2159b6dd1a89SLars Ellenberg nc = rcu_dereference(connection->net_conf);
2160b6dd1a89SLars Ellenberg cork = nc ? nc->tcp_cork : 0;
2161b6dd1a89SLars Ellenberg rcu_read_unlock();
2162b6dd1a89SLars Ellenberg mutex_lock(&connection->data.mutex);
2163b6dd1a89SLars Ellenberg if (connection->data.socket) {
2164b6dd1a89SLars Ellenberg if (cork)
2165db10538aSChristoph Hellwig tcp_sock_set_cork(connection->data.socket->sk, true);
2166b6dd1a89SLars Ellenberg else if (!uncork)
2167db10538aSChristoph Hellwig tcp_sock_set_cork(connection->data.socket->sk, false);
2168b6dd1a89SLars Ellenberg }
2169b6dd1a89SLars Ellenberg mutex_unlock(&connection->data.mutex);
2170b6dd1a89SLars Ellenberg }
2171b6dd1a89SLars Ellenberg
drbd_worker(struct drbd_thread * thi)2172b411b363SPhilipp Reisner int drbd_worker(struct drbd_thread *thi)
2173b411b363SPhilipp Reisner {
2174bde89a9eSAndreas Gruenbacher struct drbd_connection *connection = thi->connection;
21756db7e50aSAndreas Gruenbacher struct drbd_work *w = NULL;
2176c06ece6bSAndreas Gruenbacher struct drbd_peer_device *peer_device;
2177b411b363SPhilipp Reisner LIST_HEAD(work_list);
21788c0785a5SLars Ellenberg int vnr;
2179b411b363SPhilipp Reisner
2180e77a0a5cSAndreas Gruenbacher while (get_t_state(thi) == RUNNING) {
218180822284SPhilipp Reisner drbd_thread_current_set_cpu(thi);
2182b411b363SPhilipp Reisner
2183944410e9SLars Ellenberg if (list_empty(&work_list)) {
2184944410e9SLars Ellenberg update_worker_timing_details(connection, wait_for_work);
2185bde89a9eSAndreas Gruenbacher wait_for_work(connection, &work_list);
2186944410e9SLars Ellenberg }
2187b411b363SPhilipp Reisner
2188944410e9SLars Ellenberg if (test_and_clear_bit(DEVICE_WORK_PENDING, &connection->flags)) {
2189944410e9SLars Ellenberg update_worker_timing_details(connection, do_unqueued_work);
2190e334f550SLars Ellenberg do_unqueued_work(connection);
2191944410e9SLars Ellenberg }
21925ab7d2c0SLars Ellenberg
21938c0785a5SLars Ellenberg if (signal_pending(current)) {
2194b411b363SPhilipp Reisner flush_signals(current);
219519393e10SPhilipp Reisner if (get_t_state(thi) == RUNNING) {
21961ec861ebSAndreas Gruenbacher drbd_warn(connection, "Worker got an unexpected signal\n");
2197b411b363SPhilipp Reisner continue;
219819393e10SPhilipp Reisner }
2199b411b363SPhilipp Reisner break;
2200b411b363SPhilipp Reisner }
2201b411b363SPhilipp Reisner
2202e77a0a5cSAndreas Gruenbacher if (get_t_state(thi) != RUNNING)
2203b411b363SPhilipp Reisner break;
2204b411b363SPhilipp Reisner
2205729e8b87SLars Ellenberg if (!list_empty(&work_list)) {
22066db7e50aSAndreas Gruenbacher w = list_first_entry(&work_list, struct drbd_work, list);
22076db7e50aSAndreas Gruenbacher list_del_init(&w->list);
2208944410e9SLars Ellenberg update_worker_timing_details(connection, w->cb);
22096db7e50aSAndreas Gruenbacher if (w->cb(w, connection->cstate < C_WF_REPORT_PARAMS) == 0)
22108c0785a5SLars Ellenberg continue;
2211bde89a9eSAndreas Gruenbacher if (connection->cstate >= C_WF_REPORT_PARAMS)
2212bde89a9eSAndreas Gruenbacher conn_request_state(connection, NS(conn, C_NETWORK_FAILURE), CS_HARD);
2213b411b363SPhilipp Reisner }
2214b411b363SPhilipp Reisner }
2215b411b363SPhilipp Reisner
22168c0785a5SLars Ellenberg do {
2217944410e9SLars Ellenberg if (test_and_clear_bit(DEVICE_WORK_PENDING, &connection->flags)) {
2218944410e9SLars Ellenberg update_worker_timing_details(connection, do_unqueued_work);
2219e334f550SLars Ellenberg do_unqueued_work(connection);
2220944410e9SLars Ellenberg }
2221729e8b87SLars Ellenberg if (!list_empty(&work_list)) {
22226db7e50aSAndreas Gruenbacher w = list_first_entry(&work_list, struct drbd_work, list);
22236db7e50aSAndreas Gruenbacher list_del_init(&w->list);
2224944410e9SLars Ellenberg update_worker_timing_details(connection, w->cb);
22256db7e50aSAndreas Gruenbacher w->cb(w, 1);
2226729e8b87SLars Ellenberg } else
2227bde89a9eSAndreas Gruenbacher dequeue_work_batch(&connection->sender_work, &work_list);
2228e334f550SLars Ellenberg } while (!list_empty(&work_list) || test_bit(DEVICE_WORK_PENDING, &connection->flags));
2229b411b363SPhilipp Reisner
2230c141ebdaSPhilipp Reisner rcu_read_lock();
2231c06ece6bSAndreas Gruenbacher idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
2232c06ece6bSAndreas Gruenbacher struct drbd_device *device = peer_device->device;
22330b0ba1efSAndreas Gruenbacher D_ASSERT(device, device->state.disk == D_DISKLESS && device->state.conn == C_STANDALONE);
2234b30ab791SAndreas Gruenbacher kref_get(&device->kref);
2235c141ebdaSPhilipp Reisner rcu_read_unlock();
2236b30ab791SAndreas Gruenbacher drbd_device_cleanup(device);
223705a10ec7SAndreas Gruenbacher kref_put(&device->kref, drbd_destroy_device);
2238c141ebdaSPhilipp Reisner rcu_read_lock();
22390e29d163SPhilipp Reisner }
2240c141ebdaSPhilipp Reisner rcu_read_unlock();
2241b411b363SPhilipp Reisner
2242b411b363SPhilipp Reisner return 0;
2243b411b363SPhilipp Reisner }
2244