xref: /linux/drivers/block/drbd/drbd_receiver.c (revision 70200574cc229f6ba038259e8142af2aa09e6976)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3    drbd_receiver.c
4 
5    This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 
7    Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
8    Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
9    Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 
11  */
12 
13 
14 #include <linux/module.h>
15 
16 #include <linux/uaccess.h>
17 #include <net/sock.h>
18 
19 #include <linux/drbd.h>
20 #include <linux/fs.h>
21 #include <linux/file.h>
22 #include <linux/in.h>
23 #include <linux/mm.h>
24 #include <linux/memcontrol.h>
25 #include <linux/mm_inline.h>
26 #include <linux/slab.h>
27 #include <uapi/linux/sched/types.h>
28 #include <linux/sched/signal.h>
29 #include <linux/pkt_sched.h>
30 #define __KERNEL_SYSCALLS__
31 #include <linux/unistd.h>
32 #include <linux/vmalloc.h>
33 #include <linux/random.h>
34 #include <linux/string.h>
35 #include <linux/scatterlist.h>
36 #include <linux/part_stat.h>
37 #include "drbd_int.h"
38 #include "drbd_protocol.h"
39 #include "drbd_req.h"
40 #include "drbd_vli.h"
41 
42 #define PRO_FEATURES (DRBD_FF_TRIM|DRBD_FF_THIN_RESYNC|DRBD_FF_WSAME|DRBD_FF_WZEROES)
43 
44 struct packet_info {
45 	enum drbd_packet cmd;
46 	unsigned int size;
47 	unsigned int vnr;
48 	void *data;
49 };
50 
51 enum finish_epoch {
52 	FE_STILL_LIVE,
53 	FE_DESTROYED,
54 	FE_RECYCLED,
55 };
56 
57 static int drbd_do_features(struct drbd_connection *connection);
58 static int drbd_do_auth(struct drbd_connection *connection);
59 static int drbd_disconnected(struct drbd_peer_device *);
60 static void conn_wait_active_ee_empty(struct drbd_connection *connection);
61 static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *, struct drbd_epoch *, enum epoch_event);
62 static int e_end_block(struct drbd_work *, int);
63 
64 
65 #define GFP_TRY	(__GFP_HIGHMEM | __GFP_NOWARN)
66 
67 /*
68  * some helper functions to deal with single linked page lists,
69  * page->private being our "next" pointer.
70  */
71 
72 /* If at least n pages are linked at head, get n pages off.
73  * Otherwise, don't modify head, and return NULL.
74  * Locking is the responsibility of the caller.
75  */
76 static struct page *page_chain_del(struct page **head, int n)
77 {
78 	struct page *page;
79 	struct page *tmp;
80 
81 	BUG_ON(!n);
82 	BUG_ON(!head);
83 
84 	page = *head;
85 
86 	if (!page)
87 		return NULL;
88 
89 	while (page) {
90 		tmp = page_chain_next(page);
91 		if (--n == 0)
92 			break; /* found sufficient pages */
93 		if (tmp == NULL)
94 			/* insufficient pages, don't use any of them. */
95 			return NULL;
96 		page = tmp;
97 	}
98 
99 	/* add end of list marker for the returned list */
100 	set_page_private(page, 0);
101 	/* actual return value, and adjustment of head */
102 	page = *head;
103 	*head = tmp;
104 	return page;
105 }
106 
107 /* may be used outside of locks to find the tail of a (usually short)
108  * "private" page chain, before adding it back to a global chain head
109  * with page_chain_add() under a spinlock. */
110 static struct page *page_chain_tail(struct page *page, int *len)
111 {
112 	struct page *tmp;
113 	int i = 1;
114 	while ((tmp = page_chain_next(page))) {
115 		++i;
116 		page = tmp;
117 	}
118 	if (len)
119 		*len = i;
120 	return page;
121 }
122 
123 static int page_chain_free(struct page *page)
124 {
125 	struct page *tmp;
126 	int i = 0;
127 	page_chain_for_each_safe(page, tmp) {
128 		put_page(page);
129 		++i;
130 	}
131 	return i;
132 }
133 
134 static void page_chain_add(struct page **head,
135 		struct page *chain_first, struct page *chain_last)
136 {
137 #if 1
138 	struct page *tmp;
139 	tmp = page_chain_tail(chain_first, NULL);
140 	BUG_ON(tmp != chain_last);
141 #endif
142 
143 	/* add chain to head */
144 	set_page_private(chain_last, (unsigned long)*head);
145 	*head = chain_first;
146 }
147 
148 static struct page *__drbd_alloc_pages(struct drbd_device *device,
149 				       unsigned int number)
150 {
151 	struct page *page = NULL;
152 	struct page *tmp = NULL;
153 	unsigned int i = 0;
154 
155 	/* Yes, testing drbd_pp_vacant outside the lock is racy.
156 	 * So what. It saves a spin_lock. */
157 	if (drbd_pp_vacant >= number) {
158 		spin_lock(&drbd_pp_lock);
159 		page = page_chain_del(&drbd_pp_pool, number);
160 		if (page)
161 			drbd_pp_vacant -= number;
162 		spin_unlock(&drbd_pp_lock);
163 		if (page)
164 			return page;
165 	}
166 
167 	/* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
168 	 * "criss-cross" setup, that might cause write-out on some other DRBD,
169 	 * which in turn might block on the other node at this very place.  */
170 	for (i = 0; i < number; i++) {
171 		tmp = alloc_page(GFP_TRY);
172 		if (!tmp)
173 			break;
174 		set_page_private(tmp, (unsigned long)page);
175 		page = tmp;
176 	}
177 
178 	if (i == number)
179 		return page;
180 
181 	/* Not enough pages immediately available this time.
182 	 * No need to jump around here, drbd_alloc_pages will retry this
183 	 * function "soon". */
184 	if (page) {
185 		tmp = page_chain_tail(page, NULL);
186 		spin_lock(&drbd_pp_lock);
187 		page_chain_add(&drbd_pp_pool, page, tmp);
188 		drbd_pp_vacant += i;
189 		spin_unlock(&drbd_pp_lock);
190 	}
191 	return NULL;
192 }
193 
194 static void reclaim_finished_net_peer_reqs(struct drbd_device *device,
195 					   struct list_head *to_be_freed)
196 {
197 	struct drbd_peer_request *peer_req, *tmp;
198 
199 	/* The EEs are always appended to the end of the list. Since
200 	   they are sent in order over the wire, they have to finish
201 	   in order. As soon as we see the first not finished we can
202 	   stop to examine the list... */
203 
204 	list_for_each_entry_safe(peer_req, tmp, &device->net_ee, w.list) {
205 		if (drbd_peer_req_has_active_page(peer_req))
206 			break;
207 		list_move(&peer_req->w.list, to_be_freed);
208 	}
209 }
210 
211 static void drbd_reclaim_net_peer_reqs(struct drbd_device *device)
212 {
213 	LIST_HEAD(reclaimed);
214 	struct drbd_peer_request *peer_req, *t;
215 
216 	spin_lock_irq(&device->resource->req_lock);
217 	reclaim_finished_net_peer_reqs(device, &reclaimed);
218 	spin_unlock_irq(&device->resource->req_lock);
219 	list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
220 		drbd_free_net_peer_req(device, peer_req);
221 }
222 
223 static void conn_reclaim_net_peer_reqs(struct drbd_connection *connection)
224 {
225 	struct drbd_peer_device *peer_device;
226 	int vnr;
227 
228 	rcu_read_lock();
229 	idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
230 		struct drbd_device *device = peer_device->device;
231 		if (!atomic_read(&device->pp_in_use_by_net))
232 			continue;
233 
234 		kref_get(&device->kref);
235 		rcu_read_unlock();
236 		drbd_reclaim_net_peer_reqs(device);
237 		kref_put(&device->kref, drbd_destroy_device);
238 		rcu_read_lock();
239 	}
240 	rcu_read_unlock();
241 }
242 
243 /**
244  * drbd_alloc_pages() - Returns @number pages, retries forever (or until signalled)
245  * @peer_device:	DRBD device.
246  * @number:		number of pages requested
247  * @retry:		whether to retry, if not enough pages are available right now
248  *
249  * Tries to allocate number pages, first from our own page pool, then from
250  * the kernel.
251  * Possibly retry until DRBD frees sufficient pages somewhere else.
252  *
253  * If this allocation would exceed the max_buffers setting, we throttle
254  * allocation (schedule_timeout) to give the system some room to breathe.
255  *
256  * We do not use max-buffers as hard limit, because it could lead to
257  * congestion and further to a distributed deadlock during online-verify or
258  * (checksum based) resync, if the max-buffers, socket buffer sizes and
259  * resync-rate settings are mis-configured.
260  *
261  * Returns a page chain linked via page->private.
262  */
263 struct page *drbd_alloc_pages(struct drbd_peer_device *peer_device, unsigned int number,
264 			      bool retry)
265 {
266 	struct drbd_device *device = peer_device->device;
267 	struct page *page = NULL;
268 	struct net_conf *nc;
269 	DEFINE_WAIT(wait);
270 	unsigned int mxb;
271 
272 	rcu_read_lock();
273 	nc = rcu_dereference(peer_device->connection->net_conf);
274 	mxb = nc ? nc->max_buffers : 1000000;
275 	rcu_read_unlock();
276 
277 	if (atomic_read(&device->pp_in_use) < mxb)
278 		page = __drbd_alloc_pages(device, number);
279 
280 	/* Try to keep the fast path fast, but occasionally we need
281 	 * to reclaim the pages we lended to the network stack. */
282 	if (page && atomic_read(&device->pp_in_use_by_net) > 512)
283 		drbd_reclaim_net_peer_reqs(device);
284 
285 	while (page == NULL) {
286 		prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
287 
288 		drbd_reclaim_net_peer_reqs(device);
289 
290 		if (atomic_read(&device->pp_in_use) < mxb) {
291 			page = __drbd_alloc_pages(device, number);
292 			if (page)
293 				break;
294 		}
295 
296 		if (!retry)
297 			break;
298 
299 		if (signal_pending(current)) {
300 			drbd_warn(device, "drbd_alloc_pages interrupted!\n");
301 			break;
302 		}
303 
304 		if (schedule_timeout(HZ/10) == 0)
305 			mxb = UINT_MAX;
306 	}
307 	finish_wait(&drbd_pp_wait, &wait);
308 
309 	if (page)
310 		atomic_add(number, &device->pp_in_use);
311 	return page;
312 }
313 
314 /* Must not be used from irq, as that may deadlock: see drbd_alloc_pages.
315  * Is also used from inside an other spin_lock_irq(&resource->req_lock);
316  * Either links the page chain back to the global pool,
317  * or returns all pages to the system. */
318 static void drbd_free_pages(struct drbd_device *device, struct page *page, int is_net)
319 {
320 	atomic_t *a = is_net ? &device->pp_in_use_by_net : &device->pp_in_use;
321 	int i;
322 
323 	if (page == NULL)
324 		return;
325 
326 	if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * drbd_minor_count)
327 		i = page_chain_free(page);
328 	else {
329 		struct page *tmp;
330 		tmp = page_chain_tail(page, &i);
331 		spin_lock(&drbd_pp_lock);
332 		page_chain_add(&drbd_pp_pool, page, tmp);
333 		drbd_pp_vacant += i;
334 		spin_unlock(&drbd_pp_lock);
335 	}
336 	i = atomic_sub_return(i, a);
337 	if (i < 0)
338 		drbd_warn(device, "ASSERTION FAILED: %s: %d < 0\n",
339 			is_net ? "pp_in_use_by_net" : "pp_in_use", i);
340 	wake_up(&drbd_pp_wait);
341 }
342 
343 /*
344 You need to hold the req_lock:
345  _drbd_wait_ee_list_empty()
346 
347 You must not have the req_lock:
348  drbd_free_peer_req()
349  drbd_alloc_peer_req()
350  drbd_free_peer_reqs()
351  drbd_ee_fix_bhs()
352  drbd_finish_peer_reqs()
353  drbd_clear_done_ee()
354  drbd_wait_ee_list_empty()
355 */
356 
357 /* normal: payload_size == request size (bi_size)
358  * w_same: payload_size == logical_block_size
359  * trim: payload_size == 0 */
360 struct drbd_peer_request *
361 drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t sector,
362 		    unsigned int request_size, unsigned int payload_size, gfp_t gfp_mask) __must_hold(local)
363 {
364 	struct drbd_device *device = peer_device->device;
365 	struct drbd_peer_request *peer_req;
366 	struct page *page = NULL;
367 	unsigned nr_pages = (payload_size + PAGE_SIZE -1) >> PAGE_SHIFT;
368 
369 	if (drbd_insert_fault(device, DRBD_FAULT_AL_EE))
370 		return NULL;
371 
372 	peer_req = mempool_alloc(&drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
373 	if (!peer_req) {
374 		if (!(gfp_mask & __GFP_NOWARN))
375 			drbd_err(device, "%s: allocation failed\n", __func__);
376 		return NULL;
377 	}
378 
379 	if (nr_pages) {
380 		page = drbd_alloc_pages(peer_device, nr_pages,
381 					gfpflags_allow_blocking(gfp_mask));
382 		if (!page)
383 			goto fail;
384 	}
385 
386 	memset(peer_req, 0, sizeof(*peer_req));
387 	INIT_LIST_HEAD(&peer_req->w.list);
388 	drbd_clear_interval(&peer_req->i);
389 	peer_req->i.size = request_size;
390 	peer_req->i.sector = sector;
391 	peer_req->submit_jif = jiffies;
392 	peer_req->peer_device = peer_device;
393 	peer_req->pages = page;
394 	/*
395 	 * The block_id is opaque to the receiver.  It is not endianness
396 	 * converted, and sent back to the sender unchanged.
397 	 */
398 	peer_req->block_id = id;
399 
400 	return peer_req;
401 
402  fail:
403 	mempool_free(peer_req, &drbd_ee_mempool);
404 	return NULL;
405 }
406 
407 void __drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request *peer_req,
408 		       int is_net)
409 {
410 	might_sleep();
411 	if (peer_req->flags & EE_HAS_DIGEST)
412 		kfree(peer_req->digest);
413 	drbd_free_pages(device, peer_req->pages, is_net);
414 	D_ASSERT(device, atomic_read(&peer_req->pending_bios) == 0);
415 	D_ASSERT(device, drbd_interval_empty(&peer_req->i));
416 	if (!expect(!(peer_req->flags & EE_CALL_AL_COMPLETE_IO))) {
417 		peer_req->flags &= ~EE_CALL_AL_COMPLETE_IO;
418 		drbd_al_complete_io(device, &peer_req->i);
419 	}
420 	mempool_free(peer_req, &drbd_ee_mempool);
421 }
422 
423 int drbd_free_peer_reqs(struct drbd_device *device, struct list_head *list)
424 {
425 	LIST_HEAD(work_list);
426 	struct drbd_peer_request *peer_req, *t;
427 	int count = 0;
428 	int is_net = list == &device->net_ee;
429 
430 	spin_lock_irq(&device->resource->req_lock);
431 	list_splice_init(list, &work_list);
432 	spin_unlock_irq(&device->resource->req_lock);
433 
434 	list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
435 		__drbd_free_peer_req(device, peer_req, is_net);
436 		count++;
437 	}
438 	return count;
439 }
440 
441 /*
442  * See also comments in _req_mod(,BARRIER_ACKED) and receive_Barrier.
443  */
444 static int drbd_finish_peer_reqs(struct drbd_device *device)
445 {
446 	LIST_HEAD(work_list);
447 	LIST_HEAD(reclaimed);
448 	struct drbd_peer_request *peer_req, *t;
449 	int err = 0;
450 
451 	spin_lock_irq(&device->resource->req_lock);
452 	reclaim_finished_net_peer_reqs(device, &reclaimed);
453 	list_splice_init(&device->done_ee, &work_list);
454 	spin_unlock_irq(&device->resource->req_lock);
455 
456 	list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
457 		drbd_free_net_peer_req(device, peer_req);
458 
459 	/* possible callbacks here:
460 	 * e_end_block, and e_end_resync_block, e_send_superseded.
461 	 * all ignore the last argument.
462 	 */
463 	list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
464 		int err2;
465 
466 		/* list_del not necessary, next/prev members not touched */
467 		err2 = peer_req->w.cb(&peer_req->w, !!err);
468 		if (!err)
469 			err = err2;
470 		drbd_free_peer_req(device, peer_req);
471 	}
472 	wake_up(&device->ee_wait);
473 
474 	return err;
475 }
476 
477 static void _drbd_wait_ee_list_empty(struct drbd_device *device,
478 				     struct list_head *head)
479 {
480 	DEFINE_WAIT(wait);
481 
482 	/* avoids spin_lock/unlock
483 	 * and calling prepare_to_wait in the fast path */
484 	while (!list_empty(head)) {
485 		prepare_to_wait(&device->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
486 		spin_unlock_irq(&device->resource->req_lock);
487 		io_schedule();
488 		finish_wait(&device->ee_wait, &wait);
489 		spin_lock_irq(&device->resource->req_lock);
490 	}
491 }
492 
493 static void drbd_wait_ee_list_empty(struct drbd_device *device,
494 				    struct list_head *head)
495 {
496 	spin_lock_irq(&device->resource->req_lock);
497 	_drbd_wait_ee_list_empty(device, head);
498 	spin_unlock_irq(&device->resource->req_lock);
499 }
500 
501 static int drbd_recv_short(struct socket *sock, void *buf, size_t size, int flags)
502 {
503 	struct kvec iov = {
504 		.iov_base = buf,
505 		.iov_len = size,
506 	};
507 	struct msghdr msg = {
508 		.msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
509 	};
510 	iov_iter_kvec(&msg.msg_iter, READ, &iov, 1, size);
511 	return sock_recvmsg(sock, &msg, msg.msg_flags);
512 }
513 
514 static int drbd_recv(struct drbd_connection *connection, void *buf, size_t size)
515 {
516 	int rv;
517 
518 	rv = drbd_recv_short(connection->data.socket, buf, size, 0);
519 
520 	if (rv < 0) {
521 		if (rv == -ECONNRESET)
522 			drbd_info(connection, "sock was reset by peer\n");
523 		else if (rv != -ERESTARTSYS)
524 			drbd_err(connection, "sock_recvmsg returned %d\n", rv);
525 	} else if (rv == 0) {
526 		if (test_bit(DISCONNECT_SENT, &connection->flags)) {
527 			long t;
528 			rcu_read_lock();
529 			t = rcu_dereference(connection->net_conf)->ping_timeo * HZ/10;
530 			rcu_read_unlock();
531 
532 			t = wait_event_timeout(connection->ping_wait, connection->cstate < C_WF_REPORT_PARAMS, t);
533 
534 			if (t)
535 				goto out;
536 		}
537 		drbd_info(connection, "sock was shut down by peer\n");
538 	}
539 
540 	if (rv != size)
541 		conn_request_state(connection, NS(conn, C_BROKEN_PIPE), CS_HARD);
542 
543 out:
544 	return rv;
545 }
546 
547 static int drbd_recv_all(struct drbd_connection *connection, void *buf, size_t size)
548 {
549 	int err;
550 
551 	err = drbd_recv(connection, buf, size);
552 	if (err != size) {
553 		if (err >= 0)
554 			err = -EIO;
555 	} else
556 		err = 0;
557 	return err;
558 }
559 
560 static int drbd_recv_all_warn(struct drbd_connection *connection, void *buf, size_t size)
561 {
562 	int err;
563 
564 	err = drbd_recv_all(connection, buf, size);
565 	if (err && !signal_pending(current))
566 		drbd_warn(connection, "short read (expected size %d)\n", (int)size);
567 	return err;
568 }
569 
570 /* quoting tcp(7):
571  *   On individual connections, the socket buffer size must be set prior to the
572  *   listen(2) or connect(2) calls in order to have it take effect.
573  * This is our wrapper to do so.
574  */
575 static void drbd_setbufsize(struct socket *sock, unsigned int snd,
576 		unsigned int rcv)
577 {
578 	/* open coded SO_SNDBUF, SO_RCVBUF */
579 	if (snd) {
580 		sock->sk->sk_sndbuf = snd;
581 		sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
582 	}
583 	if (rcv) {
584 		sock->sk->sk_rcvbuf = rcv;
585 		sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
586 	}
587 }
588 
589 static struct socket *drbd_try_connect(struct drbd_connection *connection)
590 {
591 	const char *what;
592 	struct socket *sock;
593 	struct sockaddr_in6 src_in6;
594 	struct sockaddr_in6 peer_in6;
595 	struct net_conf *nc;
596 	int err, peer_addr_len, my_addr_len;
597 	int sndbuf_size, rcvbuf_size, connect_int;
598 	int disconnect_on_error = 1;
599 
600 	rcu_read_lock();
601 	nc = rcu_dereference(connection->net_conf);
602 	if (!nc) {
603 		rcu_read_unlock();
604 		return NULL;
605 	}
606 	sndbuf_size = nc->sndbuf_size;
607 	rcvbuf_size = nc->rcvbuf_size;
608 	connect_int = nc->connect_int;
609 	rcu_read_unlock();
610 
611 	my_addr_len = min_t(int, connection->my_addr_len, sizeof(src_in6));
612 	memcpy(&src_in6, &connection->my_addr, my_addr_len);
613 
614 	if (((struct sockaddr *)&connection->my_addr)->sa_family == AF_INET6)
615 		src_in6.sin6_port = 0;
616 	else
617 		((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
618 
619 	peer_addr_len = min_t(int, connection->peer_addr_len, sizeof(src_in6));
620 	memcpy(&peer_in6, &connection->peer_addr, peer_addr_len);
621 
622 	what = "sock_create_kern";
623 	err = sock_create_kern(&init_net, ((struct sockaddr *)&src_in6)->sa_family,
624 			       SOCK_STREAM, IPPROTO_TCP, &sock);
625 	if (err < 0) {
626 		sock = NULL;
627 		goto out;
628 	}
629 
630 	sock->sk->sk_rcvtimeo =
631 	sock->sk->sk_sndtimeo = connect_int * HZ;
632 	drbd_setbufsize(sock, sndbuf_size, rcvbuf_size);
633 
634        /* explicitly bind to the configured IP as source IP
635 	*  for the outgoing connections.
636 	*  This is needed for multihomed hosts and to be
637 	*  able to use lo: interfaces for drbd.
638 	* Make sure to use 0 as port number, so linux selects
639 	*  a free one dynamically.
640 	*/
641 	what = "bind before connect";
642 	err = sock->ops->bind(sock, (struct sockaddr *) &src_in6, my_addr_len);
643 	if (err < 0)
644 		goto out;
645 
646 	/* connect may fail, peer not yet available.
647 	 * stay C_WF_CONNECTION, don't go Disconnecting! */
648 	disconnect_on_error = 0;
649 	what = "connect";
650 	err = sock->ops->connect(sock, (struct sockaddr *) &peer_in6, peer_addr_len, 0);
651 
652 out:
653 	if (err < 0) {
654 		if (sock) {
655 			sock_release(sock);
656 			sock = NULL;
657 		}
658 		switch (-err) {
659 			/* timeout, busy, signal pending */
660 		case ETIMEDOUT: case EAGAIN: case EINPROGRESS:
661 		case EINTR: case ERESTARTSYS:
662 			/* peer not (yet) available, network problem */
663 		case ECONNREFUSED: case ENETUNREACH:
664 		case EHOSTDOWN:    case EHOSTUNREACH:
665 			disconnect_on_error = 0;
666 			break;
667 		default:
668 			drbd_err(connection, "%s failed, err = %d\n", what, err);
669 		}
670 		if (disconnect_on_error)
671 			conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
672 	}
673 
674 	return sock;
675 }
676 
677 struct accept_wait_data {
678 	struct drbd_connection *connection;
679 	struct socket *s_listen;
680 	struct completion door_bell;
681 	void (*original_sk_state_change)(struct sock *sk);
682 
683 };
684 
685 static void drbd_incoming_connection(struct sock *sk)
686 {
687 	struct accept_wait_data *ad = sk->sk_user_data;
688 	void (*state_change)(struct sock *sk);
689 
690 	state_change = ad->original_sk_state_change;
691 	if (sk->sk_state == TCP_ESTABLISHED)
692 		complete(&ad->door_bell);
693 	state_change(sk);
694 }
695 
696 static int prepare_listen_socket(struct drbd_connection *connection, struct accept_wait_data *ad)
697 {
698 	int err, sndbuf_size, rcvbuf_size, my_addr_len;
699 	struct sockaddr_in6 my_addr;
700 	struct socket *s_listen;
701 	struct net_conf *nc;
702 	const char *what;
703 
704 	rcu_read_lock();
705 	nc = rcu_dereference(connection->net_conf);
706 	if (!nc) {
707 		rcu_read_unlock();
708 		return -EIO;
709 	}
710 	sndbuf_size = nc->sndbuf_size;
711 	rcvbuf_size = nc->rcvbuf_size;
712 	rcu_read_unlock();
713 
714 	my_addr_len = min_t(int, connection->my_addr_len, sizeof(struct sockaddr_in6));
715 	memcpy(&my_addr, &connection->my_addr, my_addr_len);
716 
717 	what = "sock_create_kern";
718 	err = sock_create_kern(&init_net, ((struct sockaddr *)&my_addr)->sa_family,
719 			       SOCK_STREAM, IPPROTO_TCP, &s_listen);
720 	if (err) {
721 		s_listen = NULL;
722 		goto out;
723 	}
724 
725 	s_listen->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
726 	drbd_setbufsize(s_listen, sndbuf_size, rcvbuf_size);
727 
728 	what = "bind before listen";
729 	err = s_listen->ops->bind(s_listen, (struct sockaddr *)&my_addr, my_addr_len);
730 	if (err < 0)
731 		goto out;
732 
733 	ad->s_listen = s_listen;
734 	write_lock_bh(&s_listen->sk->sk_callback_lock);
735 	ad->original_sk_state_change = s_listen->sk->sk_state_change;
736 	s_listen->sk->sk_state_change = drbd_incoming_connection;
737 	s_listen->sk->sk_user_data = ad;
738 	write_unlock_bh(&s_listen->sk->sk_callback_lock);
739 
740 	what = "listen";
741 	err = s_listen->ops->listen(s_listen, 5);
742 	if (err < 0)
743 		goto out;
744 
745 	return 0;
746 out:
747 	if (s_listen)
748 		sock_release(s_listen);
749 	if (err < 0) {
750 		if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
751 			drbd_err(connection, "%s failed, err = %d\n", what, err);
752 			conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
753 		}
754 	}
755 
756 	return -EIO;
757 }
758 
759 static void unregister_state_change(struct sock *sk, struct accept_wait_data *ad)
760 {
761 	write_lock_bh(&sk->sk_callback_lock);
762 	sk->sk_state_change = ad->original_sk_state_change;
763 	sk->sk_user_data = NULL;
764 	write_unlock_bh(&sk->sk_callback_lock);
765 }
766 
767 static struct socket *drbd_wait_for_connect(struct drbd_connection *connection, struct accept_wait_data *ad)
768 {
769 	int timeo, connect_int, err = 0;
770 	struct socket *s_estab = NULL;
771 	struct net_conf *nc;
772 
773 	rcu_read_lock();
774 	nc = rcu_dereference(connection->net_conf);
775 	if (!nc) {
776 		rcu_read_unlock();
777 		return NULL;
778 	}
779 	connect_int = nc->connect_int;
780 	rcu_read_unlock();
781 
782 	timeo = connect_int * HZ;
783 	/* 28.5% random jitter */
784 	timeo += (prandom_u32() & 1) ? timeo / 7 : -timeo / 7;
785 
786 	err = wait_for_completion_interruptible_timeout(&ad->door_bell, timeo);
787 	if (err <= 0)
788 		return NULL;
789 
790 	err = kernel_accept(ad->s_listen, &s_estab, 0);
791 	if (err < 0) {
792 		if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
793 			drbd_err(connection, "accept failed, err = %d\n", err);
794 			conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
795 		}
796 	}
797 
798 	if (s_estab)
799 		unregister_state_change(s_estab->sk, ad);
800 
801 	return s_estab;
802 }
803 
804 static int decode_header(struct drbd_connection *, void *, struct packet_info *);
805 
806 static int send_first_packet(struct drbd_connection *connection, struct drbd_socket *sock,
807 			     enum drbd_packet cmd)
808 {
809 	if (!conn_prepare_command(connection, sock))
810 		return -EIO;
811 	return conn_send_command(connection, sock, cmd, 0, NULL, 0);
812 }
813 
814 static int receive_first_packet(struct drbd_connection *connection, struct socket *sock)
815 {
816 	unsigned int header_size = drbd_header_size(connection);
817 	struct packet_info pi;
818 	struct net_conf *nc;
819 	int err;
820 
821 	rcu_read_lock();
822 	nc = rcu_dereference(connection->net_conf);
823 	if (!nc) {
824 		rcu_read_unlock();
825 		return -EIO;
826 	}
827 	sock->sk->sk_rcvtimeo = nc->ping_timeo * 4 * HZ / 10;
828 	rcu_read_unlock();
829 
830 	err = drbd_recv_short(sock, connection->data.rbuf, header_size, 0);
831 	if (err != header_size) {
832 		if (err >= 0)
833 			err = -EIO;
834 		return err;
835 	}
836 	err = decode_header(connection, connection->data.rbuf, &pi);
837 	if (err)
838 		return err;
839 	return pi.cmd;
840 }
841 
842 /**
843  * drbd_socket_okay() - Free the socket if its connection is not okay
844  * @sock:	pointer to the pointer to the socket.
845  */
846 static bool drbd_socket_okay(struct socket **sock)
847 {
848 	int rr;
849 	char tb[4];
850 
851 	if (!*sock)
852 		return false;
853 
854 	rr = drbd_recv_short(*sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
855 
856 	if (rr > 0 || rr == -EAGAIN) {
857 		return true;
858 	} else {
859 		sock_release(*sock);
860 		*sock = NULL;
861 		return false;
862 	}
863 }
864 
865 static bool connection_established(struct drbd_connection *connection,
866 				   struct socket **sock1,
867 				   struct socket **sock2)
868 {
869 	struct net_conf *nc;
870 	int timeout;
871 	bool ok;
872 
873 	if (!*sock1 || !*sock2)
874 		return false;
875 
876 	rcu_read_lock();
877 	nc = rcu_dereference(connection->net_conf);
878 	timeout = (nc->sock_check_timeo ?: nc->ping_timeo) * HZ / 10;
879 	rcu_read_unlock();
880 	schedule_timeout_interruptible(timeout);
881 
882 	ok = drbd_socket_okay(sock1);
883 	ok = drbd_socket_okay(sock2) && ok;
884 
885 	return ok;
886 }
887 
888 /* Gets called if a connection is established, or if a new minor gets created
889    in a connection */
890 int drbd_connected(struct drbd_peer_device *peer_device)
891 {
892 	struct drbd_device *device = peer_device->device;
893 	int err;
894 
895 	atomic_set(&device->packet_seq, 0);
896 	device->peer_seq = 0;
897 
898 	device->state_mutex = peer_device->connection->agreed_pro_version < 100 ?
899 		&peer_device->connection->cstate_mutex :
900 		&device->own_state_mutex;
901 
902 	err = drbd_send_sync_param(peer_device);
903 	if (!err)
904 		err = drbd_send_sizes(peer_device, 0, 0);
905 	if (!err)
906 		err = drbd_send_uuids(peer_device);
907 	if (!err)
908 		err = drbd_send_current_state(peer_device);
909 	clear_bit(USE_DEGR_WFC_T, &device->flags);
910 	clear_bit(RESIZE_PENDING, &device->flags);
911 	atomic_set(&device->ap_in_flight, 0);
912 	mod_timer(&device->request_timer, jiffies + HZ); /* just start it here. */
913 	return err;
914 }
915 
916 /*
917  * return values:
918  *   1 yes, we have a valid connection
919  *   0 oops, did not work out, please try again
920  *  -1 peer talks different language,
921  *     no point in trying again, please go standalone.
922  *  -2 We do not have a network config...
923  */
924 static int conn_connect(struct drbd_connection *connection)
925 {
926 	struct drbd_socket sock, msock;
927 	struct drbd_peer_device *peer_device;
928 	struct net_conf *nc;
929 	int vnr, timeout, h;
930 	bool discard_my_data, ok;
931 	enum drbd_state_rv rv;
932 	struct accept_wait_data ad = {
933 		.connection = connection,
934 		.door_bell = COMPLETION_INITIALIZER_ONSTACK(ad.door_bell),
935 	};
936 
937 	clear_bit(DISCONNECT_SENT, &connection->flags);
938 	if (conn_request_state(connection, NS(conn, C_WF_CONNECTION), CS_VERBOSE) < SS_SUCCESS)
939 		return -2;
940 
941 	mutex_init(&sock.mutex);
942 	sock.sbuf = connection->data.sbuf;
943 	sock.rbuf = connection->data.rbuf;
944 	sock.socket = NULL;
945 	mutex_init(&msock.mutex);
946 	msock.sbuf = connection->meta.sbuf;
947 	msock.rbuf = connection->meta.rbuf;
948 	msock.socket = NULL;
949 
950 	/* Assume that the peer only understands protocol 80 until we know better.  */
951 	connection->agreed_pro_version = 80;
952 
953 	if (prepare_listen_socket(connection, &ad))
954 		return 0;
955 
956 	do {
957 		struct socket *s;
958 
959 		s = drbd_try_connect(connection);
960 		if (s) {
961 			if (!sock.socket) {
962 				sock.socket = s;
963 				send_first_packet(connection, &sock, P_INITIAL_DATA);
964 			} else if (!msock.socket) {
965 				clear_bit(RESOLVE_CONFLICTS, &connection->flags);
966 				msock.socket = s;
967 				send_first_packet(connection, &msock, P_INITIAL_META);
968 			} else {
969 				drbd_err(connection, "Logic error in conn_connect()\n");
970 				goto out_release_sockets;
971 			}
972 		}
973 
974 		if (connection_established(connection, &sock.socket, &msock.socket))
975 			break;
976 
977 retry:
978 		s = drbd_wait_for_connect(connection, &ad);
979 		if (s) {
980 			int fp = receive_first_packet(connection, s);
981 			drbd_socket_okay(&sock.socket);
982 			drbd_socket_okay(&msock.socket);
983 			switch (fp) {
984 			case P_INITIAL_DATA:
985 				if (sock.socket) {
986 					drbd_warn(connection, "initial packet S crossed\n");
987 					sock_release(sock.socket);
988 					sock.socket = s;
989 					goto randomize;
990 				}
991 				sock.socket = s;
992 				break;
993 			case P_INITIAL_META:
994 				set_bit(RESOLVE_CONFLICTS, &connection->flags);
995 				if (msock.socket) {
996 					drbd_warn(connection, "initial packet M crossed\n");
997 					sock_release(msock.socket);
998 					msock.socket = s;
999 					goto randomize;
1000 				}
1001 				msock.socket = s;
1002 				break;
1003 			default:
1004 				drbd_warn(connection, "Error receiving initial packet\n");
1005 				sock_release(s);
1006 randomize:
1007 				if (prandom_u32() & 1)
1008 					goto retry;
1009 			}
1010 		}
1011 
1012 		if (connection->cstate <= C_DISCONNECTING)
1013 			goto out_release_sockets;
1014 		if (signal_pending(current)) {
1015 			flush_signals(current);
1016 			smp_rmb();
1017 			if (get_t_state(&connection->receiver) == EXITING)
1018 				goto out_release_sockets;
1019 		}
1020 
1021 		ok = connection_established(connection, &sock.socket, &msock.socket);
1022 	} while (!ok);
1023 
1024 	if (ad.s_listen)
1025 		sock_release(ad.s_listen);
1026 
1027 	sock.socket->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
1028 	msock.socket->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
1029 
1030 	sock.socket->sk->sk_allocation = GFP_NOIO;
1031 	msock.socket->sk->sk_allocation = GFP_NOIO;
1032 
1033 	sock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
1034 	msock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE;
1035 
1036 	/* NOT YET ...
1037 	 * sock.socket->sk->sk_sndtimeo = connection->net_conf->timeout*HZ/10;
1038 	 * sock.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
1039 	 * first set it to the P_CONNECTION_FEATURES timeout,
1040 	 * which we set to 4x the configured ping_timeout. */
1041 	rcu_read_lock();
1042 	nc = rcu_dereference(connection->net_conf);
1043 
1044 	sock.socket->sk->sk_sndtimeo =
1045 	sock.socket->sk->sk_rcvtimeo = nc->ping_timeo*4*HZ/10;
1046 
1047 	msock.socket->sk->sk_rcvtimeo = nc->ping_int*HZ;
1048 	timeout = nc->timeout * HZ / 10;
1049 	discard_my_data = nc->discard_my_data;
1050 	rcu_read_unlock();
1051 
1052 	msock.socket->sk->sk_sndtimeo = timeout;
1053 
1054 	/* we don't want delays.
1055 	 * we use TCP_CORK where appropriate, though */
1056 	tcp_sock_set_nodelay(sock.socket->sk);
1057 	tcp_sock_set_nodelay(msock.socket->sk);
1058 
1059 	connection->data.socket = sock.socket;
1060 	connection->meta.socket = msock.socket;
1061 	connection->last_received = jiffies;
1062 
1063 	h = drbd_do_features(connection);
1064 	if (h <= 0)
1065 		return h;
1066 
1067 	if (connection->cram_hmac_tfm) {
1068 		/* drbd_request_state(device, NS(conn, WFAuth)); */
1069 		switch (drbd_do_auth(connection)) {
1070 		case -1:
1071 			drbd_err(connection, "Authentication of peer failed\n");
1072 			return -1;
1073 		case 0:
1074 			drbd_err(connection, "Authentication of peer failed, trying again.\n");
1075 			return 0;
1076 		}
1077 	}
1078 
1079 	connection->data.socket->sk->sk_sndtimeo = timeout;
1080 	connection->data.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
1081 
1082 	if (drbd_send_protocol(connection) == -EOPNOTSUPP)
1083 		return -1;
1084 
1085 	/* Prevent a race between resync-handshake and
1086 	 * being promoted to Primary.
1087 	 *
1088 	 * Grab and release the state mutex, so we know that any current
1089 	 * drbd_set_role() is finished, and any incoming drbd_set_role
1090 	 * will see the STATE_SENT flag, and wait for it to be cleared.
1091 	 */
1092 	idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
1093 		mutex_lock(peer_device->device->state_mutex);
1094 
1095 	/* avoid a race with conn_request_state( C_DISCONNECTING ) */
1096 	spin_lock_irq(&connection->resource->req_lock);
1097 	set_bit(STATE_SENT, &connection->flags);
1098 	spin_unlock_irq(&connection->resource->req_lock);
1099 
1100 	idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
1101 		mutex_unlock(peer_device->device->state_mutex);
1102 
1103 	rcu_read_lock();
1104 	idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
1105 		struct drbd_device *device = peer_device->device;
1106 		kref_get(&device->kref);
1107 		rcu_read_unlock();
1108 
1109 		if (discard_my_data)
1110 			set_bit(DISCARD_MY_DATA, &device->flags);
1111 		else
1112 			clear_bit(DISCARD_MY_DATA, &device->flags);
1113 
1114 		drbd_connected(peer_device);
1115 		kref_put(&device->kref, drbd_destroy_device);
1116 		rcu_read_lock();
1117 	}
1118 	rcu_read_unlock();
1119 
1120 	rv = conn_request_state(connection, NS(conn, C_WF_REPORT_PARAMS), CS_VERBOSE);
1121 	if (rv < SS_SUCCESS || connection->cstate != C_WF_REPORT_PARAMS) {
1122 		clear_bit(STATE_SENT, &connection->flags);
1123 		return 0;
1124 	}
1125 
1126 	drbd_thread_start(&connection->ack_receiver);
1127 	/* opencoded create_singlethread_workqueue(),
1128 	 * to be able to use format string arguments */
1129 	connection->ack_sender =
1130 		alloc_ordered_workqueue("drbd_as_%s", WQ_MEM_RECLAIM, connection->resource->name);
1131 	if (!connection->ack_sender) {
1132 		drbd_err(connection, "Failed to create workqueue ack_sender\n");
1133 		return 0;
1134 	}
1135 
1136 	mutex_lock(&connection->resource->conf_update);
1137 	/* The discard_my_data flag is a single-shot modifier to the next
1138 	 * connection attempt, the handshake of which is now well underway.
1139 	 * No need for rcu style copying of the whole struct
1140 	 * just to clear a single value. */
1141 	connection->net_conf->discard_my_data = 0;
1142 	mutex_unlock(&connection->resource->conf_update);
1143 
1144 	return h;
1145 
1146 out_release_sockets:
1147 	if (ad.s_listen)
1148 		sock_release(ad.s_listen);
1149 	if (sock.socket)
1150 		sock_release(sock.socket);
1151 	if (msock.socket)
1152 		sock_release(msock.socket);
1153 	return -1;
1154 }
1155 
1156 static int decode_header(struct drbd_connection *connection, void *header, struct packet_info *pi)
1157 {
1158 	unsigned int header_size = drbd_header_size(connection);
1159 
1160 	if (header_size == sizeof(struct p_header100) &&
1161 	    *(__be32 *)header == cpu_to_be32(DRBD_MAGIC_100)) {
1162 		struct p_header100 *h = header;
1163 		if (h->pad != 0) {
1164 			drbd_err(connection, "Header padding is not zero\n");
1165 			return -EINVAL;
1166 		}
1167 		pi->vnr = be16_to_cpu(h->volume);
1168 		pi->cmd = be16_to_cpu(h->command);
1169 		pi->size = be32_to_cpu(h->length);
1170 	} else if (header_size == sizeof(struct p_header95) &&
1171 		   *(__be16 *)header == cpu_to_be16(DRBD_MAGIC_BIG)) {
1172 		struct p_header95 *h = header;
1173 		pi->cmd = be16_to_cpu(h->command);
1174 		pi->size = be32_to_cpu(h->length);
1175 		pi->vnr = 0;
1176 	} else if (header_size == sizeof(struct p_header80) &&
1177 		   *(__be32 *)header == cpu_to_be32(DRBD_MAGIC)) {
1178 		struct p_header80 *h = header;
1179 		pi->cmd = be16_to_cpu(h->command);
1180 		pi->size = be16_to_cpu(h->length);
1181 		pi->vnr = 0;
1182 	} else {
1183 		drbd_err(connection, "Wrong magic value 0x%08x in protocol version %d\n",
1184 			 be32_to_cpu(*(__be32 *)header),
1185 			 connection->agreed_pro_version);
1186 		return -EINVAL;
1187 	}
1188 	pi->data = header + header_size;
1189 	return 0;
1190 }
1191 
1192 static void drbd_unplug_all_devices(struct drbd_connection *connection)
1193 {
1194 	if (current->plug == &connection->receiver_plug) {
1195 		blk_finish_plug(&connection->receiver_plug);
1196 		blk_start_plug(&connection->receiver_plug);
1197 	} /* else: maybe just schedule() ?? */
1198 }
1199 
1200 static int drbd_recv_header(struct drbd_connection *connection, struct packet_info *pi)
1201 {
1202 	void *buffer = connection->data.rbuf;
1203 	int err;
1204 
1205 	err = drbd_recv_all_warn(connection, buffer, drbd_header_size(connection));
1206 	if (err)
1207 		return err;
1208 
1209 	err = decode_header(connection, buffer, pi);
1210 	connection->last_received = jiffies;
1211 
1212 	return err;
1213 }
1214 
1215 static int drbd_recv_header_maybe_unplug(struct drbd_connection *connection, struct packet_info *pi)
1216 {
1217 	void *buffer = connection->data.rbuf;
1218 	unsigned int size = drbd_header_size(connection);
1219 	int err;
1220 
1221 	err = drbd_recv_short(connection->data.socket, buffer, size, MSG_NOSIGNAL|MSG_DONTWAIT);
1222 	if (err != size) {
1223 		/* If we have nothing in the receive buffer now, to reduce
1224 		 * application latency, try to drain the backend queues as
1225 		 * quickly as possible, and let remote TCP know what we have
1226 		 * received so far. */
1227 		if (err == -EAGAIN) {
1228 			tcp_sock_set_quickack(connection->data.socket->sk, 2);
1229 			drbd_unplug_all_devices(connection);
1230 		}
1231 		if (err > 0) {
1232 			buffer += err;
1233 			size -= err;
1234 		}
1235 		err = drbd_recv_all_warn(connection, buffer, size);
1236 		if (err)
1237 			return err;
1238 	}
1239 
1240 	err = decode_header(connection, connection->data.rbuf, pi);
1241 	connection->last_received = jiffies;
1242 
1243 	return err;
1244 }
1245 /* This is blkdev_issue_flush, but asynchronous.
1246  * We want to submit to all component volumes in parallel,
1247  * then wait for all completions.
1248  */
1249 struct issue_flush_context {
1250 	atomic_t pending;
1251 	int error;
1252 	struct completion done;
1253 };
1254 struct one_flush_context {
1255 	struct drbd_device *device;
1256 	struct issue_flush_context *ctx;
1257 };
1258 
1259 static void one_flush_endio(struct bio *bio)
1260 {
1261 	struct one_flush_context *octx = bio->bi_private;
1262 	struct drbd_device *device = octx->device;
1263 	struct issue_flush_context *ctx = octx->ctx;
1264 
1265 	if (bio->bi_status) {
1266 		ctx->error = blk_status_to_errno(bio->bi_status);
1267 		drbd_info(device, "local disk FLUSH FAILED with status %d\n", bio->bi_status);
1268 	}
1269 	kfree(octx);
1270 	bio_put(bio);
1271 
1272 	clear_bit(FLUSH_PENDING, &device->flags);
1273 	put_ldev(device);
1274 	kref_put(&device->kref, drbd_destroy_device);
1275 
1276 	if (atomic_dec_and_test(&ctx->pending))
1277 		complete(&ctx->done);
1278 }
1279 
1280 static void submit_one_flush(struct drbd_device *device, struct issue_flush_context *ctx)
1281 {
1282 	struct bio *bio = bio_alloc(device->ldev->backing_bdev, 0,
1283 				    REQ_OP_FLUSH | REQ_PREFLUSH, GFP_NOIO);
1284 	struct one_flush_context *octx = kmalloc(sizeof(*octx), GFP_NOIO);
1285 
1286 	if (!octx) {
1287 		drbd_warn(device, "Could not allocate a octx, CANNOT ISSUE FLUSH\n");
1288 		/* FIXME: what else can I do now?  disconnecting or detaching
1289 		 * really does not help to improve the state of the world, either.
1290 		 */
1291 		bio_put(bio);
1292 
1293 		ctx->error = -ENOMEM;
1294 		put_ldev(device);
1295 		kref_put(&device->kref, drbd_destroy_device);
1296 		return;
1297 	}
1298 
1299 	octx->device = device;
1300 	octx->ctx = ctx;
1301 	bio->bi_private = octx;
1302 	bio->bi_end_io = one_flush_endio;
1303 
1304 	device->flush_jif = jiffies;
1305 	set_bit(FLUSH_PENDING, &device->flags);
1306 	atomic_inc(&ctx->pending);
1307 	submit_bio(bio);
1308 }
1309 
1310 static void drbd_flush(struct drbd_connection *connection)
1311 {
1312 	if (connection->resource->write_ordering >= WO_BDEV_FLUSH) {
1313 		struct drbd_peer_device *peer_device;
1314 		struct issue_flush_context ctx;
1315 		int vnr;
1316 
1317 		atomic_set(&ctx.pending, 1);
1318 		ctx.error = 0;
1319 		init_completion(&ctx.done);
1320 
1321 		rcu_read_lock();
1322 		idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
1323 			struct drbd_device *device = peer_device->device;
1324 
1325 			if (!get_ldev(device))
1326 				continue;
1327 			kref_get(&device->kref);
1328 			rcu_read_unlock();
1329 
1330 			submit_one_flush(device, &ctx);
1331 
1332 			rcu_read_lock();
1333 		}
1334 		rcu_read_unlock();
1335 
1336 		/* Do we want to add a timeout,
1337 		 * if disk-timeout is set? */
1338 		if (!atomic_dec_and_test(&ctx.pending))
1339 			wait_for_completion(&ctx.done);
1340 
1341 		if (ctx.error) {
1342 			/* would rather check on EOPNOTSUPP, but that is not reliable.
1343 			 * don't try again for ANY return value != 0
1344 			 * if (rv == -EOPNOTSUPP) */
1345 			/* Any error is already reported by bio_endio callback. */
1346 			drbd_bump_write_ordering(connection->resource, NULL, WO_DRAIN_IO);
1347 		}
1348 	}
1349 }
1350 
1351 /**
1352  * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
1353  * @connection:	DRBD connection.
1354  * @epoch:	Epoch object.
1355  * @ev:		Epoch event.
1356  */
1357 static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connection,
1358 					       struct drbd_epoch *epoch,
1359 					       enum epoch_event ev)
1360 {
1361 	int epoch_size;
1362 	struct drbd_epoch *next_epoch;
1363 	enum finish_epoch rv = FE_STILL_LIVE;
1364 
1365 	spin_lock(&connection->epoch_lock);
1366 	do {
1367 		next_epoch = NULL;
1368 
1369 		epoch_size = atomic_read(&epoch->epoch_size);
1370 
1371 		switch (ev & ~EV_CLEANUP) {
1372 		case EV_PUT:
1373 			atomic_dec(&epoch->active);
1374 			break;
1375 		case EV_GOT_BARRIER_NR:
1376 			set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
1377 			break;
1378 		case EV_BECAME_LAST:
1379 			/* nothing to do*/
1380 			break;
1381 		}
1382 
1383 		if (epoch_size != 0 &&
1384 		    atomic_read(&epoch->active) == 0 &&
1385 		    (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) || ev & EV_CLEANUP)) {
1386 			if (!(ev & EV_CLEANUP)) {
1387 				spin_unlock(&connection->epoch_lock);
1388 				drbd_send_b_ack(epoch->connection, epoch->barrier_nr, epoch_size);
1389 				spin_lock(&connection->epoch_lock);
1390 			}
1391 #if 0
1392 			/* FIXME: dec unacked on connection, once we have
1393 			 * something to count pending connection packets in. */
1394 			if (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags))
1395 				dec_unacked(epoch->connection);
1396 #endif
1397 
1398 			if (connection->current_epoch != epoch) {
1399 				next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
1400 				list_del(&epoch->list);
1401 				ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
1402 				connection->epochs--;
1403 				kfree(epoch);
1404 
1405 				if (rv == FE_STILL_LIVE)
1406 					rv = FE_DESTROYED;
1407 			} else {
1408 				epoch->flags = 0;
1409 				atomic_set(&epoch->epoch_size, 0);
1410 				/* atomic_set(&epoch->active, 0); is already zero */
1411 				if (rv == FE_STILL_LIVE)
1412 					rv = FE_RECYCLED;
1413 			}
1414 		}
1415 
1416 		if (!next_epoch)
1417 			break;
1418 
1419 		epoch = next_epoch;
1420 	} while (1);
1421 
1422 	spin_unlock(&connection->epoch_lock);
1423 
1424 	return rv;
1425 }
1426 
1427 static enum write_ordering_e
1428 max_allowed_wo(struct drbd_backing_dev *bdev, enum write_ordering_e wo)
1429 {
1430 	struct disk_conf *dc;
1431 
1432 	dc = rcu_dereference(bdev->disk_conf);
1433 
1434 	if (wo == WO_BDEV_FLUSH && !dc->disk_flushes)
1435 		wo = WO_DRAIN_IO;
1436 	if (wo == WO_DRAIN_IO && !dc->disk_drain)
1437 		wo = WO_NONE;
1438 
1439 	return wo;
1440 }
1441 
1442 /*
1443  * drbd_bump_write_ordering() - Fall back to an other write ordering method
1444  * @wo:		Write ordering method to try.
1445  */
1446 void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backing_dev *bdev,
1447 			      enum write_ordering_e wo)
1448 {
1449 	struct drbd_device *device;
1450 	enum write_ordering_e pwo;
1451 	int vnr;
1452 	static char *write_ordering_str[] = {
1453 		[WO_NONE] = "none",
1454 		[WO_DRAIN_IO] = "drain",
1455 		[WO_BDEV_FLUSH] = "flush",
1456 	};
1457 
1458 	pwo = resource->write_ordering;
1459 	if (wo != WO_BDEV_FLUSH)
1460 		wo = min(pwo, wo);
1461 	rcu_read_lock();
1462 	idr_for_each_entry(&resource->devices, device, vnr) {
1463 		if (get_ldev(device)) {
1464 			wo = max_allowed_wo(device->ldev, wo);
1465 			if (device->ldev == bdev)
1466 				bdev = NULL;
1467 			put_ldev(device);
1468 		}
1469 	}
1470 
1471 	if (bdev)
1472 		wo = max_allowed_wo(bdev, wo);
1473 
1474 	rcu_read_unlock();
1475 
1476 	resource->write_ordering = wo;
1477 	if (pwo != resource->write_ordering || wo == WO_BDEV_FLUSH)
1478 		drbd_info(resource, "Method to ensure write ordering: %s\n", write_ordering_str[resource->write_ordering]);
1479 }
1480 
1481 /*
1482  * Mapping "discard" to ZEROOUT with UNMAP does not work for us:
1483  * Drivers have to "announce" q->limits.max_write_zeroes_sectors, or it
1484  * will directly go to fallback mode, submitting normal writes, and
1485  * never even try to UNMAP.
1486  *
1487  * And dm-thin does not do this (yet), mostly because in general it has
1488  * to assume that "skip_block_zeroing" is set.  See also:
1489  * https://www.mail-archive.com/dm-devel%40redhat.com/msg07965.html
1490  * https://www.redhat.com/archives/dm-devel/2018-January/msg00271.html
1491  *
1492  * We *may* ignore the discard-zeroes-data setting, if so configured.
1493  *
1494  * Assumption is that this "discard_zeroes_data=0" is only because the backend
1495  * may ignore partial unaligned discards.
1496  *
1497  * LVM/DM thin as of at least
1498  *   LVM version:     2.02.115(2)-RHEL7 (2015-01-28)
1499  *   Library version: 1.02.93-RHEL7 (2015-01-28)
1500  *   Driver version:  4.29.0
1501  * still behaves this way.
1502  *
1503  * For unaligned (wrt. alignment and granularity) or too small discards,
1504  * we zero-out the initial (and/or) trailing unaligned partial chunks,
1505  * but discard all the aligned full chunks.
1506  *
1507  * At least for LVM/DM thin, with skip_block_zeroing=false,
1508  * the result is effectively "discard_zeroes_data=1".
1509  */
1510 /* flags: EE_TRIM|EE_ZEROOUT */
1511 int drbd_issue_discard_or_zero_out(struct drbd_device *device, sector_t start, unsigned int nr_sectors, int flags)
1512 {
1513 	struct block_device *bdev = device->ldev->backing_bdev;
1514 	struct request_queue *q = bdev_get_queue(bdev);
1515 	sector_t tmp, nr;
1516 	unsigned int max_discard_sectors, granularity;
1517 	int alignment;
1518 	int err = 0;
1519 
1520 	if ((flags & EE_ZEROOUT) || !(flags & EE_TRIM))
1521 		goto zero_out;
1522 
1523 	/* Zero-sector (unknown) and one-sector granularities are the same.  */
1524 	granularity = max(q->limits.discard_granularity >> 9, 1U);
1525 	alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
1526 
1527 	max_discard_sectors = min(bdev_max_discard_sectors(bdev), (1U << 22));
1528 	max_discard_sectors -= max_discard_sectors % granularity;
1529 	if (unlikely(!max_discard_sectors))
1530 		goto zero_out;
1531 
1532 	if (nr_sectors < granularity)
1533 		goto zero_out;
1534 
1535 	tmp = start;
1536 	if (sector_div(tmp, granularity) != alignment) {
1537 		if (nr_sectors < 2*granularity)
1538 			goto zero_out;
1539 		/* start + gran - (start + gran - align) % gran */
1540 		tmp = start + granularity - alignment;
1541 		tmp = start + granularity - sector_div(tmp, granularity);
1542 
1543 		nr = tmp - start;
1544 		/* don't flag BLKDEV_ZERO_NOUNMAP, we don't know how many
1545 		 * layers are below us, some may have smaller granularity */
1546 		err |= blkdev_issue_zeroout(bdev, start, nr, GFP_NOIO, 0);
1547 		nr_sectors -= nr;
1548 		start = tmp;
1549 	}
1550 	while (nr_sectors >= max_discard_sectors) {
1551 		err |= blkdev_issue_discard(bdev, start, max_discard_sectors, GFP_NOIO, 0);
1552 		nr_sectors -= max_discard_sectors;
1553 		start += max_discard_sectors;
1554 	}
1555 	if (nr_sectors) {
1556 		/* max_discard_sectors is unsigned int (and a multiple of
1557 		 * granularity, we made sure of that above already);
1558 		 * nr is < max_discard_sectors;
1559 		 * I don't need sector_div here, even though nr is sector_t */
1560 		nr = nr_sectors;
1561 		nr -= (unsigned int)nr % granularity;
1562 		if (nr) {
1563 			err |= blkdev_issue_discard(bdev, start, nr, GFP_NOIO, 0);
1564 			nr_sectors -= nr;
1565 			start += nr;
1566 		}
1567 	}
1568  zero_out:
1569 	if (nr_sectors) {
1570 		err |= blkdev_issue_zeroout(bdev, start, nr_sectors, GFP_NOIO,
1571 				(flags & EE_TRIM) ? 0 : BLKDEV_ZERO_NOUNMAP);
1572 	}
1573 	return err != 0;
1574 }
1575 
1576 static bool can_do_reliable_discards(struct drbd_device *device)
1577 {
1578 	struct disk_conf *dc;
1579 	bool can_do;
1580 
1581 	if (!bdev_max_discard_sectors(device->ldev->backing_bdev))
1582 		return false;
1583 
1584 	rcu_read_lock();
1585 	dc = rcu_dereference(device->ldev->disk_conf);
1586 	can_do = dc->discard_zeroes_if_aligned;
1587 	rcu_read_unlock();
1588 	return can_do;
1589 }
1590 
1591 static void drbd_issue_peer_discard_or_zero_out(struct drbd_device *device, struct drbd_peer_request *peer_req)
1592 {
1593 	/* If the backend cannot discard, or does not guarantee
1594 	 * read-back zeroes in discarded ranges, we fall back to
1595 	 * zero-out.  Unless configuration specifically requested
1596 	 * otherwise. */
1597 	if (!can_do_reliable_discards(device))
1598 		peer_req->flags |= EE_ZEROOUT;
1599 
1600 	if (drbd_issue_discard_or_zero_out(device, peer_req->i.sector,
1601 	    peer_req->i.size >> 9, peer_req->flags & (EE_ZEROOUT|EE_TRIM)))
1602 		peer_req->flags |= EE_WAS_ERROR;
1603 	drbd_endio_write_sec_final(peer_req);
1604 }
1605 
1606 /**
1607  * drbd_submit_peer_request()
1608  * @device:	DRBD device.
1609  * @peer_req:	peer request
1610  *
1611  * May spread the pages to multiple bios,
1612  * depending on bio_add_page restrictions.
1613  *
1614  * Returns 0 if all bios have been submitted,
1615  * -ENOMEM if we could not allocate enough bios,
1616  * -ENOSPC (any better suggestion?) if we have not been able to bio_add_page a
1617  *  single page to an empty bio (which should never happen and likely indicates
1618  *  that the lower level IO stack is in some way broken). This has been observed
1619  *  on certain Xen deployments.
1620  */
1621 /* TODO allocate from our own bio_set. */
1622 int drbd_submit_peer_request(struct drbd_device *device,
1623 			     struct drbd_peer_request *peer_req,
1624 			     const unsigned op, const unsigned op_flags,
1625 			     const int fault_type)
1626 {
1627 	struct bio *bios = NULL;
1628 	struct bio *bio;
1629 	struct page *page = peer_req->pages;
1630 	sector_t sector = peer_req->i.sector;
1631 	unsigned data_size = peer_req->i.size;
1632 	unsigned n_bios = 0;
1633 	unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
1634 
1635 	/* TRIM/DISCARD: for now, always use the helper function
1636 	 * blkdev_issue_zeroout(..., discard=true).
1637 	 * It's synchronous, but it does the right thing wrt. bio splitting.
1638 	 * Correctness first, performance later.  Next step is to code an
1639 	 * asynchronous variant of the same.
1640 	 */
1641 	if (peer_req->flags & (EE_TRIM | EE_ZEROOUT)) {
1642 		/* wait for all pending IO completions, before we start
1643 		 * zeroing things out. */
1644 		conn_wait_active_ee_empty(peer_req->peer_device->connection);
1645 		/* add it to the active list now,
1646 		 * so we can find it to present it in debugfs */
1647 		peer_req->submit_jif = jiffies;
1648 		peer_req->flags |= EE_SUBMITTED;
1649 
1650 		/* If this was a resync request from receive_rs_deallocated(),
1651 		 * it is already on the sync_ee list */
1652 		if (list_empty(&peer_req->w.list)) {
1653 			spin_lock_irq(&device->resource->req_lock);
1654 			list_add_tail(&peer_req->w.list, &device->active_ee);
1655 			spin_unlock_irq(&device->resource->req_lock);
1656 		}
1657 
1658 		drbd_issue_peer_discard_or_zero_out(device, peer_req);
1659 		return 0;
1660 	}
1661 
1662 	/* In most cases, we will only need one bio.  But in case the lower
1663 	 * level restrictions happen to be different at this offset on this
1664 	 * side than those of the sending peer, we may need to submit the
1665 	 * request in more than one bio.
1666 	 *
1667 	 * Plain bio_alloc is good enough here, this is no DRBD internally
1668 	 * generated bio, but a bio allocated on behalf of the peer.
1669 	 */
1670 next_bio:
1671 	bio = bio_alloc(device->ldev->backing_bdev, nr_pages, op | op_flags,
1672 			GFP_NOIO);
1673 	/* > peer_req->i.sector, unless this is the first bio */
1674 	bio->bi_iter.bi_sector = sector;
1675 	bio->bi_private = peer_req;
1676 	bio->bi_end_io = drbd_peer_request_endio;
1677 
1678 	bio->bi_next = bios;
1679 	bios = bio;
1680 	++n_bios;
1681 
1682 	page_chain_for_each(page) {
1683 		unsigned len = min_t(unsigned, data_size, PAGE_SIZE);
1684 		if (!bio_add_page(bio, page, len, 0))
1685 			goto next_bio;
1686 		data_size -= len;
1687 		sector += len >> 9;
1688 		--nr_pages;
1689 	}
1690 	D_ASSERT(device, data_size == 0);
1691 	D_ASSERT(device, page == NULL);
1692 
1693 	atomic_set(&peer_req->pending_bios, n_bios);
1694 	/* for debugfs: update timestamp, mark as submitted */
1695 	peer_req->submit_jif = jiffies;
1696 	peer_req->flags |= EE_SUBMITTED;
1697 	do {
1698 		bio = bios;
1699 		bios = bios->bi_next;
1700 		bio->bi_next = NULL;
1701 
1702 		drbd_submit_bio_noacct(device, fault_type, bio);
1703 	} while (bios);
1704 	return 0;
1705 }
1706 
1707 static void drbd_remove_epoch_entry_interval(struct drbd_device *device,
1708 					     struct drbd_peer_request *peer_req)
1709 {
1710 	struct drbd_interval *i = &peer_req->i;
1711 
1712 	drbd_remove_interval(&device->write_requests, i);
1713 	drbd_clear_interval(i);
1714 
1715 	/* Wake up any processes waiting for this peer request to complete.  */
1716 	if (i->waiting)
1717 		wake_up(&device->misc_wait);
1718 }
1719 
1720 static void conn_wait_active_ee_empty(struct drbd_connection *connection)
1721 {
1722 	struct drbd_peer_device *peer_device;
1723 	int vnr;
1724 
1725 	rcu_read_lock();
1726 	idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
1727 		struct drbd_device *device = peer_device->device;
1728 
1729 		kref_get(&device->kref);
1730 		rcu_read_unlock();
1731 		drbd_wait_ee_list_empty(device, &device->active_ee);
1732 		kref_put(&device->kref, drbd_destroy_device);
1733 		rcu_read_lock();
1734 	}
1735 	rcu_read_unlock();
1736 }
1737 
1738 static int receive_Barrier(struct drbd_connection *connection, struct packet_info *pi)
1739 {
1740 	int rv;
1741 	struct p_barrier *p = pi->data;
1742 	struct drbd_epoch *epoch;
1743 
1744 	/* FIXME these are unacked on connection,
1745 	 * not a specific (peer)device.
1746 	 */
1747 	connection->current_epoch->barrier_nr = p->barrier;
1748 	connection->current_epoch->connection = connection;
1749 	rv = drbd_may_finish_epoch(connection, connection->current_epoch, EV_GOT_BARRIER_NR);
1750 
1751 	/* P_BARRIER_ACK may imply that the corresponding extent is dropped from
1752 	 * the activity log, which means it would not be resynced in case the
1753 	 * R_PRIMARY crashes now.
1754 	 * Therefore we must send the barrier_ack after the barrier request was
1755 	 * completed. */
1756 	switch (connection->resource->write_ordering) {
1757 	case WO_NONE:
1758 		if (rv == FE_RECYCLED)
1759 			return 0;
1760 
1761 		/* receiver context, in the writeout path of the other node.
1762 		 * avoid potential distributed deadlock */
1763 		epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1764 		if (epoch)
1765 			break;
1766 		else
1767 			drbd_warn(connection, "Allocation of an epoch failed, slowing down\n");
1768 		fallthrough;
1769 
1770 	case WO_BDEV_FLUSH:
1771 	case WO_DRAIN_IO:
1772 		conn_wait_active_ee_empty(connection);
1773 		drbd_flush(connection);
1774 
1775 		if (atomic_read(&connection->current_epoch->epoch_size)) {
1776 			epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1777 			if (epoch)
1778 				break;
1779 		}
1780 
1781 		return 0;
1782 	default:
1783 		drbd_err(connection, "Strangeness in connection->write_ordering %d\n",
1784 			 connection->resource->write_ordering);
1785 		return -EIO;
1786 	}
1787 
1788 	epoch->flags = 0;
1789 	atomic_set(&epoch->epoch_size, 0);
1790 	atomic_set(&epoch->active, 0);
1791 
1792 	spin_lock(&connection->epoch_lock);
1793 	if (atomic_read(&connection->current_epoch->epoch_size)) {
1794 		list_add(&epoch->list, &connection->current_epoch->list);
1795 		connection->current_epoch = epoch;
1796 		connection->epochs++;
1797 	} else {
1798 		/* The current_epoch got recycled while we allocated this one... */
1799 		kfree(epoch);
1800 	}
1801 	spin_unlock(&connection->epoch_lock);
1802 
1803 	return 0;
1804 }
1805 
1806 /* quick wrapper in case payload size != request_size (write same) */
1807 static void drbd_csum_ee_size(struct crypto_shash *h,
1808 			      struct drbd_peer_request *r, void *d,
1809 			      unsigned int payload_size)
1810 {
1811 	unsigned int tmp = r->i.size;
1812 	r->i.size = payload_size;
1813 	drbd_csum_ee(h, r, d);
1814 	r->i.size = tmp;
1815 }
1816 
1817 /* used from receive_RSDataReply (recv_resync_read)
1818  * and from receive_Data.
1819  * data_size: actual payload ("data in")
1820  * 	for normal writes that is bi_size.
1821  * 	for discards, that is zero.
1822  * 	for write same, it is logical_block_size.
1823  * both trim and write same have the bi_size ("data len to be affected")
1824  * as extra argument in the packet header.
1825  */
1826 static struct drbd_peer_request *
1827 read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector,
1828 	      struct packet_info *pi) __must_hold(local)
1829 {
1830 	struct drbd_device *device = peer_device->device;
1831 	const sector_t capacity = get_capacity(device->vdisk);
1832 	struct drbd_peer_request *peer_req;
1833 	struct page *page;
1834 	int digest_size, err;
1835 	unsigned int data_size = pi->size, ds;
1836 	void *dig_in = peer_device->connection->int_dig_in;
1837 	void *dig_vv = peer_device->connection->int_dig_vv;
1838 	unsigned long *data;
1839 	struct p_trim *trim = (pi->cmd == P_TRIM) ? pi->data : NULL;
1840 	struct p_trim *zeroes = (pi->cmd == P_ZEROES) ? pi->data : NULL;
1841 
1842 	digest_size = 0;
1843 	if (!trim && peer_device->connection->peer_integrity_tfm) {
1844 		digest_size = crypto_shash_digestsize(peer_device->connection->peer_integrity_tfm);
1845 		/*
1846 		 * FIXME: Receive the incoming digest into the receive buffer
1847 		 *	  here, together with its struct p_data?
1848 		 */
1849 		err = drbd_recv_all_warn(peer_device->connection, dig_in, digest_size);
1850 		if (err)
1851 			return NULL;
1852 		data_size -= digest_size;
1853 	}
1854 
1855 	/* assume request_size == data_size, but special case trim. */
1856 	ds = data_size;
1857 	if (trim) {
1858 		if (!expect(data_size == 0))
1859 			return NULL;
1860 		ds = be32_to_cpu(trim->size);
1861 	} else if (zeroes) {
1862 		if (!expect(data_size == 0))
1863 			return NULL;
1864 		ds = be32_to_cpu(zeroes->size);
1865 	}
1866 
1867 	if (!expect(IS_ALIGNED(ds, 512)))
1868 		return NULL;
1869 	if (trim || zeroes) {
1870 		if (!expect(ds <= (DRBD_MAX_BBIO_SECTORS << 9)))
1871 			return NULL;
1872 	} else if (!expect(ds <= DRBD_MAX_BIO_SIZE))
1873 		return NULL;
1874 
1875 	/* even though we trust out peer,
1876 	 * we sometimes have to double check. */
1877 	if (sector + (ds>>9) > capacity) {
1878 		drbd_err(device, "request from peer beyond end of local disk: "
1879 			"capacity: %llus < sector: %llus + size: %u\n",
1880 			(unsigned long long)capacity,
1881 			(unsigned long long)sector, ds);
1882 		return NULL;
1883 	}
1884 
1885 	/* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1886 	 * "criss-cross" setup, that might cause write-out on some other DRBD,
1887 	 * which in turn might block on the other node at this very place.  */
1888 	peer_req = drbd_alloc_peer_req(peer_device, id, sector, ds, data_size, GFP_NOIO);
1889 	if (!peer_req)
1890 		return NULL;
1891 
1892 	peer_req->flags |= EE_WRITE;
1893 	if (trim) {
1894 		peer_req->flags |= EE_TRIM;
1895 		return peer_req;
1896 	}
1897 	if (zeroes) {
1898 		peer_req->flags |= EE_ZEROOUT;
1899 		return peer_req;
1900 	}
1901 
1902 	/* receive payload size bytes into page chain */
1903 	ds = data_size;
1904 	page = peer_req->pages;
1905 	page_chain_for_each(page) {
1906 		unsigned len = min_t(int, ds, PAGE_SIZE);
1907 		data = kmap(page);
1908 		err = drbd_recv_all_warn(peer_device->connection, data, len);
1909 		if (drbd_insert_fault(device, DRBD_FAULT_RECEIVE)) {
1910 			drbd_err(device, "Fault injection: Corrupting data on receive\n");
1911 			data[0] = data[0] ^ (unsigned long)-1;
1912 		}
1913 		kunmap(page);
1914 		if (err) {
1915 			drbd_free_peer_req(device, peer_req);
1916 			return NULL;
1917 		}
1918 		ds -= len;
1919 	}
1920 
1921 	if (digest_size) {
1922 		drbd_csum_ee_size(peer_device->connection->peer_integrity_tfm, peer_req, dig_vv, data_size);
1923 		if (memcmp(dig_in, dig_vv, digest_size)) {
1924 			drbd_err(device, "Digest integrity check FAILED: %llus +%u\n",
1925 				(unsigned long long)sector, data_size);
1926 			drbd_free_peer_req(device, peer_req);
1927 			return NULL;
1928 		}
1929 	}
1930 	device->recv_cnt += data_size >> 9;
1931 	return peer_req;
1932 }
1933 
1934 /* drbd_drain_block() just takes a data block
1935  * out of the socket input buffer, and discards it.
1936  */
1937 static int drbd_drain_block(struct drbd_peer_device *peer_device, int data_size)
1938 {
1939 	struct page *page;
1940 	int err = 0;
1941 	void *data;
1942 
1943 	if (!data_size)
1944 		return 0;
1945 
1946 	page = drbd_alloc_pages(peer_device, 1, 1);
1947 
1948 	data = kmap(page);
1949 	while (data_size) {
1950 		unsigned int len = min_t(int, data_size, PAGE_SIZE);
1951 
1952 		err = drbd_recv_all_warn(peer_device->connection, data, len);
1953 		if (err)
1954 			break;
1955 		data_size -= len;
1956 	}
1957 	kunmap(page);
1958 	drbd_free_pages(peer_device->device, page, 0);
1959 	return err;
1960 }
1961 
1962 static int recv_dless_read(struct drbd_peer_device *peer_device, struct drbd_request *req,
1963 			   sector_t sector, int data_size)
1964 {
1965 	struct bio_vec bvec;
1966 	struct bvec_iter iter;
1967 	struct bio *bio;
1968 	int digest_size, err, expect;
1969 	void *dig_in = peer_device->connection->int_dig_in;
1970 	void *dig_vv = peer_device->connection->int_dig_vv;
1971 
1972 	digest_size = 0;
1973 	if (peer_device->connection->peer_integrity_tfm) {
1974 		digest_size = crypto_shash_digestsize(peer_device->connection->peer_integrity_tfm);
1975 		err = drbd_recv_all_warn(peer_device->connection, dig_in, digest_size);
1976 		if (err)
1977 			return err;
1978 		data_size -= digest_size;
1979 	}
1980 
1981 	/* optimistically update recv_cnt.  if receiving fails below,
1982 	 * we disconnect anyways, and counters will be reset. */
1983 	peer_device->device->recv_cnt += data_size>>9;
1984 
1985 	bio = req->master_bio;
1986 	D_ASSERT(peer_device->device, sector == bio->bi_iter.bi_sector);
1987 
1988 	bio_for_each_segment(bvec, bio, iter) {
1989 		void *mapped = bvec_kmap_local(&bvec);
1990 		expect = min_t(int, data_size, bvec.bv_len);
1991 		err = drbd_recv_all_warn(peer_device->connection, mapped, expect);
1992 		kunmap_local(mapped);
1993 		if (err)
1994 			return err;
1995 		data_size -= expect;
1996 	}
1997 
1998 	if (digest_size) {
1999 		drbd_csum_bio(peer_device->connection->peer_integrity_tfm, bio, dig_vv);
2000 		if (memcmp(dig_in, dig_vv, digest_size)) {
2001 			drbd_err(peer_device, "Digest integrity check FAILED. Broken NICs?\n");
2002 			return -EINVAL;
2003 		}
2004 	}
2005 
2006 	D_ASSERT(peer_device->device, data_size == 0);
2007 	return 0;
2008 }
2009 
2010 /*
2011  * e_end_resync_block() is called in ack_sender context via
2012  * drbd_finish_peer_reqs().
2013  */
2014 static int e_end_resync_block(struct drbd_work *w, int unused)
2015 {
2016 	struct drbd_peer_request *peer_req =
2017 		container_of(w, struct drbd_peer_request, w);
2018 	struct drbd_peer_device *peer_device = peer_req->peer_device;
2019 	struct drbd_device *device = peer_device->device;
2020 	sector_t sector = peer_req->i.sector;
2021 	int err;
2022 
2023 	D_ASSERT(device, drbd_interval_empty(&peer_req->i));
2024 
2025 	if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
2026 		drbd_set_in_sync(device, sector, peer_req->i.size);
2027 		err = drbd_send_ack(peer_device, P_RS_WRITE_ACK, peer_req);
2028 	} else {
2029 		/* Record failure to sync */
2030 		drbd_rs_failed_io(device, sector, peer_req->i.size);
2031 
2032 		err  = drbd_send_ack(peer_device, P_NEG_ACK, peer_req);
2033 	}
2034 	dec_unacked(device);
2035 
2036 	return err;
2037 }
2038 
2039 static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t sector,
2040 			    struct packet_info *pi) __releases(local)
2041 {
2042 	struct drbd_device *device = peer_device->device;
2043 	struct drbd_peer_request *peer_req;
2044 
2045 	peer_req = read_in_block(peer_device, ID_SYNCER, sector, pi);
2046 	if (!peer_req)
2047 		goto fail;
2048 
2049 	dec_rs_pending(device);
2050 
2051 	inc_unacked(device);
2052 	/* corresponding dec_unacked() in e_end_resync_block()
2053 	 * respective _drbd_clear_done_ee */
2054 
2055 	peer_req->w.cb = e_end_resync_block;
2056 	peer_req->submit_jif = jiffies;
2057 
2058 	spin_lock_irq(&device->resource->req_lock);
2059 	list_add_tail(&peer_req->w.list, &device->sync_ee);
2060 	spin_unlock_irq(&device->resource->req_lock);
2061 
2062 	atomic_add(pi->size >> 9, &device->rs_sect_ev);
2063 	if (drbd_submit_peer_request(device, peer_req, REQ_OP_WRITE, 0,
2064 				     DRBD_FAULT_RS_WR) == 0)
2065 		return 0;
2066 
2067 	/* don't care for the reason here */
2068 	drbd_err(device, "submit failed, triggering re-connect\n");
2069 	spin_lock_irq(&device->resource->req_lock);
2070 	list_del(&peer_req->w.list);
2071 	spin_unlock_irq(&device->resource->req_lock);
2072 
2073 	drbd_free_peer_req(device, peer_req);
2074 fail:
2075 	put_ldev(device);
2076 	return -EIO;
2077 }
2078 
2079 static struct drbd_request *
2080 find_request(struct drbd_device *device, struct rb_root *root, u64 id,
2081 	     sector_t sector, bool missing_ok, const char *func)
2082 {
2083 	struct drbd_request *req;
2084 
2085 	/* Request object according to our peer */
2086 	req = (struct drbd_request *)(unsigned long)id;
2087 	if (drbd_contains_interval(root, sector, &req->i) && req->i.local)
2088 		return req;
2089 	if (!missing_ok) {
2090 		drbd_err(device, "%s: failed to find request 0x%lx, sector %llus\n", func,
2091 			(unsigned long)id, (unsigned long long)sector);
2092 	}
2093 	return NULL;
2094 }
2095 
2096 static int receive_DataReply(struct drbd_connection *connection, struct packet_info *pi)
2097 {
2098 	struct drbd_peer_device *peer_device;
2099 	struct drbd_device *device;
2100 	struct drbd_request *req;
2101 	sector_t sector;
2102 	int err;
2103 	struct p_data *p = pi->data;
2104 
2105 	peer_device = conn_peer_device(connection, pi->vnr);
2106 	if (!peer_device)
2107 		return -EIO;
2108 	device = peer_device->device;
2109 
2110 	sector = be64_to_cpu(p->sector);
2111 
2112 	spin_lock_irq(&device->resource->req_lock);
2113 	req = find_request(device, &device->read_requests, p->block_id, sector, false, __func__);
2114 	spin_unlock_irq(&device->resource->req_lock);
2115 	if (unlikely(!req))
2116 		return -EIO;
2117 
2118 	/* hlist_del(&req->collision) is done in _req_may_be_done, to avoid
2119 	 * special casing it there for the various failure cases.
2120 	 * still no race with drbd_fail_pending_reads */
2121 	err = recv_dless_read(peer_device, req, sector, pi->size);
2122 	if (!err)
2123 		req_mod(req, DATA_RECEIVED);
2124 	/* else: nothing. handled from drbd_disconnect...
2125 	 * I don't think we may complete this just yet
2126 	 * in case we are "on-disconnect: freeze" */
2127 
2128 	return err;
2129 }
2130 
2131 static int receive_RSDataReply(struct drbd_connection *connection, struct packet_info *pi)
2132 {
2133 	struct drbd_peer_device *peer_device;
2134 	struct drbd_device *device;
2135 	sector_t sector;
2136 	int err;
2137 	struct p_data *p = pi->data;
2138 
2139 	peer_device = conn_peer_device(connection, pi->vnr);
2140 	if (!peer_device)
2141 		return -EIO;
2142 	device = peer_device->device;
2143 
2144 	sector = be64_to_cpu(p->sector);
2145 	D_ASSERT(device, p->block_id == ID_SYNCER);
2146 
2147 	if (get_ldev(device)) {
2148 		/* data is submitted to disk within recv_resync_read.
2149 		 * corresponding put_ldev done below on error,
2150 		 * or in drbd_peer_request_endio. */
2151 		err = recv_resync_read(peer_device, sector, pi);
2152 	} else {
2153 		if (__ratelimit(&drbd_ratelimit_state))
2154 			drbd_err(device, "Can not write resync data to local disk.\n");
2155 
2156 		err = drbd_drain_block(peer_device, pi->size);
2157 
2158 		drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
2159 	}
2160 
2161 	atomic_add(pi->size >> 9, &device->rs_sect_in);
2162 
2163 	return err;
2164 }
2165 
2166 static void restart_conflicting_writes(struct drbd_device *device,
2167 				       sector_t sector, int size)
2168 {
2169 	struct drbd_interval *i;
2170 	struct drbd_request *req;
2171 
2172 	drbd_for_each_overlap(i, &device->write_requests, sector, size) {
2173 		if (!i->local)
2174 			continue;
2175 		req = container_of(i, struct drbd_request, i);
2176 		if (req->rq_state & RQ_LOCAL_PENDING ||
2177 		    !(req->rq_state & RQ_POSTPONED))
2178 			continue;
2179 		/* as it is RQ_POSTPONED, this will cause it to
2180 		 * be queued on the retry workqueue. */
2181 		__req_mod(req, CONFLICT_RESOLVED, NULL);
2182 	}
2183 }
2184 
2185 /*
2186  * e_end_block() is called in ack_sender context via drbd_finish_peer_reqs().
2187  */
2188 static int e_end_block(struct drbd_work *w, int cancel)
2189 {
2190 	struct drbd_peer_request *peer_req =
2191 		container_of(w, struct drbd_peer_request, w);
2192 	struct drbd_peer_device *peer_device = peer_req->peer_device;
2193 	struct drbd_device *device = peer_device->device;
2194 	sector_t sector = peer_req->i.sector;
2195 	int err = 0, pcmd;
2196 
2197 	if (peer_req->flags & EE_SEND_WRITE_ACK) {
2198 		if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
2199 			pcmd = (device->state.conn >= C_SYNC_SOURCE &&
2200 				device->state.conn <= C_PAUSED_SYNC_T &&
2201 				peer_req->flags & EE_MAY_SET_IN_SYNC) ?
2202 				P_RS_WRITE_ACK : P_WRITE_ACK;
2203 			err = drbd_send_ack(peer_device, pcmd, peer_req);
2204 			if (pcmd == P_RS_WRITE_ACK)
2205 				drbd_set_in_sync(device, sector, peer_req->i.size);
2206 		} else {
2207 			err = drbd_send_ack(peer_device, P_NEG_ACK, peer_req);
2208 			/* we expect it to be marked out of sync anyways...
2209 			 * maybe assert this?  */
2210 		}
2211 		dec_unacked(device);
2212 	}
2213 
2214 	/* we delete from the conflict detection hash _after_ we sent out the
2215 	 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right.  */
2216 	if (peer_req->flags & EE_IN_INTERVAL_TREE) {
2217 		spin_lock_irq(&device->resource->req_lock);
2218 		D_ASSERT(device, !drbd_interval_empty(&peer_req->i));
2219 		drbd_remove_epoch_entry_interval(device, peer_req);
2220 		if (peer_req->flags & EE_RESTART_REQUESTS)
2221 			restart_conflicting_writes(device, sector, peer_req->i.size);
2222 		spin_unlock_irq(&device->resource->req_lock);
2223 	} else
2224 		D_ASSERT(device, drbd_interval_empty(&peer_req->i));
2225 
2226 	drbd_may_finish_epoch(peer_device->connection, peer_req->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
2227 
2228 	return err;
2229 }
2230 
2231 static int e_send_ack(struct drbd_work *w, enum drbd_packet ack)
2232 {
2233 	struct drbd_peer_request *peer_req =
2234 		container_of(w, struct drbd_peer_request, w);
2235 	struct drbd_peer_device *peer_device = peer_req->peer_device;
2236 	int err;
2237 
2238 	err = drbd_send_ack(peer_device, ack, peer_req);
2239 	dec_unacked(peer_device->device);
2240 
2241 	return err;
2242 }
2243 
2244 static int e_send_superseded(struct drbd_work *w, int unused)
2245 {
2246 	return e_send_ack(w, P_SUPERSEDED);
2247 }
2248 
2249 static int e_send_retry_write(struct drbd_work *w, int unused)
2250 {
2251 	struct drbd_peer_request *peer_req =
2252 		container_of(w, struct drbd_peer_request, w);
2253 	struct drbd_connection *connection = peer_req->peer_device->connection;
2254 
2255 	return e_send_ack(w, connection->agreed_pro_version >= 100 ?
2256 			     P_RETRY_WRITE : P_SUPERSEDED);
2257 }
2258 
2259 static bool seq_greater(u32 a, u32 b)
2260 {
2261 	/*
2262 	 * We assume 32-bit wrap-around here.
2263 	 * For 24-bit wrap-around, we would have to shift:
2264 	 *  a <<= 8; b <<= 8;
2265 	 */
2266 	return (s32)a - (s32)b > 0;
2267 }
2268 
2269 static u32 seq_max(u32 a, u32 b)
2270 {
2271 	return seq_greater(a, b) ? a : b;
2272 }
2273 
2274 static void update_peer_seq(struct drbd_peer_device *peer_device, unsigned int peer_seq)
2275 {
2276 	struct drbd_device *device = peer_device->device;
2277 	unsigned int newest_peer_seq;
2278 
2279 	if (test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags)) {
2280 		spin_lock(&device->peer_seq_lock);
2281 		newest_peer_seq = seq_max(device->peer_seq, peer_seq);
2282 		device->peer_seq = newest_peer_seq;
2283 		spin_unlock(&device->peer_seq_lock);
2284 		/* wake up only if we actually changed device->peer_seq */
2285 		if (peer_seq == newest_peer_seq)
2286 			wake_up(&device->seq_wait);
2287 	}
2288 }
2289 
2290 static inline int overlaps(sector_t s1, int l1, sector_t s2, int l2)
2291 {
2292 	return !((s1 + (l1>>9) <= s2) || (s1 >= s2 + (l2>>9)));
2293 }
2294 
2295 /* maybe change sync_ee into interval trees as well? */
2296 static bool overlapping_resync_write(struct drbd_device *device, struct drbd_peer_request *peer_req)
2297 {
2298 	struct drbd_peer_request *rs_req;
2299 	bool rv = false;
2300 
2301 	spin_lock_irq(&device->resource->req_lock);
2302 	list_for_each_entry(rs_req, &device->sync_ee, w.list) {
2303 		if (overlaps(peer_req->i.sector, peer_req->i.size,
2304 			     rs_req->i.sector, rs_req->i.size)) {
2305 			rv = true;
2306 			break;
2307 		}
2308 	}
2309 	spin_unlock_irq(&device->resource->req_lock);
2310 
2311 	return rv;
2312 }
2313 
2314 /* Called from receive_Data.
2315  * Synchronize packets on sock with packets on msock.
2316  *
2317  * This is here so even when a P_DATA packet traveling via sock overtook an Ack
2318  * packet traveling on msock, they are still processed in the order they have
2319  * been sent.
2320  *
2321  * Note: we don't care for Ack packets overtaking P_DATA packets.
2322  *
2323  * In case packet_seq is larger than device->peer_seq number, there are
2324  * outstanding packets on the msock. We wait for them to arrive.
2325  * In case we are the logically next packet, we update device->peer_seq
2326  * ourselves. Correctly handles 32bit wrap around.
2327  *
2328  * Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
2329  * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
2330  * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
2331  * 1<<9 == 512 seconds aka ages for the 32bit wrap around...
2332  *
2333  * returns 0 if we may process the packet,
2334  * -ERESTARTSYS if we were interrupted (by disconnect signal). */
2335 static int wait_for_and_update_peer_seq(struct drbd_peer_device *peer_device, const u32 peer_seq)
2336 {
2337 	struct drbd_device *device = peer_device->device;
2338 	DEFINE_WAIT(wait);
2339 	long timeout;
2340 	int ret = 0, tp;
2341 
2342 	if (!test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags))
2343 		return 0;
2344 
2345 	spin_lock(&device->peer_seq_lock);
2346 	for (;;) {
2347 		if (!seq_greater(peer_seq - 1, device->peer_seq)) {
2348 			device->peer_seq = seq_max(device->peer_seq, peer_seq);
2349 			break;
2350 		}
2351 
2352 		if (signal_pending(current)) {
2353 			ret = -ERESTARTSYS;
2354 			break;
2355 		}
2356 
2357 		rcu_read_lock();
2358 		tp = rcu_dereference(peer_device->connection->net_conf)->two_primaries;
2359 		rcu_read_unlock();
2360 
2361 		if (!tp)
2362 			break;
2363 
2364 		/* Only need to wait if two_primaries is enabled */
2365 		prepare_to_wait(&device->seq_wait, &wait, TASK_INTERRUPTIBLE);
2366 		spin_unlock(&device->peer_seq_lock);
2367 		rcu_read_lock();
2368 		timeout = rcu_dereference(peer_device->connection->net_conf)->ping_timeo*HZ/10;
2369 		rcu_read_unlock();
2370 		timeout = schedule_timeout(timeout);
2371 		spin_lock(&device->peer_seq_lock);
2372 		if (!timeout) {
2373 			ret = -ETIMEDOUT;
2374 			drbd_err(device, "Timed out waiting for missing ack packets; disconnecting\n");
2375 			break;
2376 		}
2377 	}
2378 	spin_unlock(&device->peer_seq_lock);
2379 	finish_wait(&device->seq_wait, &wait);
2380 	return ret;
2381 }
2382 
2383 /* see also bio_flags_to_wire()
2384  * DRBD_REQ_*, because we need to semantically map the flags to data packet
2385  * flags and back. We may replicate to other kernel versions. */
2386 static unsigned long wire_flags_to_bio_flags(u32 dpf)
2387 {
2388 	return  (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
2389 		(dpf & DP_FUA ? REQ_FUA : 0) |
2390 		(dpf & DP_FLUSH ? REQ_PREFLUSH : 0);
2391 }
2392 
2393 static unsigned long wire_flags_to_bio_op(u32 dpf)
2394 {
2395 	if (dpf & DP_ZEROES)
2396 		return REQ_OP_WRITE_ZEROES;
2397 	if (dpf & DP_DISCARD)
2398 		return REQ_OP_DISCARD;
2399 	else
2400 		return REQ_OP_WRITE;
2401 }
2402 
2403 static void fail_postponed_requests(struct drbd_device *device, sector_t sector,
2404 				    unsigned int size)
2405 {
2406 	struct drbd_interval *i;
2407 
2408     repeat:
2409 	drbd_for_each_overlap(i, &device->write_requests, sector, size) {
2410 		struct drbd_request *req;
2411 		struct bio_and_error m;
2412 
2413 		if (!i->local)
2414 			continue;
2415 		req = container_of(i, struct drbd_request, i);
2416 		if (!(req->rq_state & RQ_POSTPONED))
2417 			continue;
2418 		req->rq_state &= ~RQ_POSTPONED;
2419 		__req_mod(req, NEG_ACKED, &m);
2420 		spin_unlock_irq(&device->resource->req_lock);
2421 		if (m.bio)
2422 			complete_master_bio(device, &m);
2423 		spin_lock_irq(&device->resource->req_lock);
2424 		goto repeat;
2425 	}
2426 }
2427 
2428 static int handle_write_conflicts(struct drbd_device *device,
2429 				  struct drbd_peer_request *peer_req)
2430 {
2431 	struct drbd_connection *connection = peer_req->peer_device->connection;
2432 	bool resolve_conflicts = test_bit(RESOLVE_CONFLICTS, &connection->flags);
2433 	sector_t sector = peer_req->i.sector;
2434 	const unsigned int size = peer_req->i.size;
2435 	struct drbd_interval *i;
2436 	bool equal;
2437 	int err;
2438 
2439 	/*
2440 	 * Inserting the peer request into the write_requests tree will prevent
2441 	 * new conflicting local requests from being added.
2442 	 */
2443 	drbd_insert_interval(&device->write_requests, &peer_req->i);
2444 
2445     repeat:
2446 	drbd_for_each_overlap(i, &device->write_requests, sector, size) {
2447 		if (i == &peer_req->i)
2448 			continue;
2449 		if (i->completed)
2450 			continue;
2451 
2452 		if (!i->local) {
2453 			/*
2454 			 * Our peer has sent a conflicting remote request; this
2455 			 * should not happen in a two-node setup.  Wait for the
2456 			 * earlier peer request to complete.
2457 			 */
2458 			err = drbd_wait_misc(device, i);
2459 			if (err)
2460 				goto out;
2461 			goto repeat;
2462 		}
2463 
2464 		equal = i->sector == sector && i->size == size;
2465 		if (resolve_conflicts) {
2466 			/*
2467 			 * If the peer request is fully contained within the
2468 			 * overlapping request, it can be considered overwritten
2469 			 * and thus superseded; otherwise, it will be retried
2470 			 * once all overlapping requests have completed.
2471 			 */
2472 			bool superseded = i->sector <= sector && i->sector +
2473 				       (i->size >> 9) >= sector + (size >> 9);
2474 
2475 			if (!equal)
2476 				drbd_alert(device, "Concurrent writes detected: "
2477 					       "local=%llus +%u, remote=%llus +%u, "
2478 					       "assuming %s came first\n",
2479 					  (unsigned long long)i->sector, i->size,
2480 					  (unsigned long long)sector, size,
2481 					  superseded ? "local" : "remote");
2482 
2483 			peer_req->w.cb = superseded ? e_send_superseded :
2484 						   e_send_retry_write;
2485 			list_add_tail(&peer_req->w.list, &device->done_ee);
2486 			queue_work(connection->ack_sender, &peer_req->peer_device->send_acks_work);
2487 
2488 			err = -ENOENT;
2489 			goto out;
2490 		} else {
2491 			struct drbd_request *req =
2492 				container_of(i, struct drbd_request, i);
2493 
2494 			if (!equal)
2495 				drbd_alert(device, "Concurrent writes detected: "
2496 					       "local=%llus +%u, remote=%llus +%u\n",
2497 					  (unsigned long long)i->sector, i->size,
2498 					  (unsigned long long)sector, size);
2499 
2500 			if (req->rq_state & RQ_LOCAL_PENDING ||
2501 			    !(req->rq_state & RQ_POSTPONED)) {
2502 				/*
2503 				 * Wait for the node with the discard flag to
2504 				 * decide if this request has been superseded
2505 				 * or needs to be retried.
2506 				 * Requests that have been superseded will
2507 				 * disappear from the write_requests tree.
2508 				 *
2509 				 * In addition, wait for the conflicting
2510 				 * request to finish locally before submitting
2511 				 * the conflicting peer request.
2512 				 */
2513 				err = drbd_wait_misc(device, &req->i);
2514 				if (err) {
2515 					_conn_request_state(connection, NS(conn, C_TIMEOUT), CS_HARD);
2516 					fail_postponed_requests(device, sector, size);
2517 					goto out;
2518 				}
2519 				goto repeat;
2520 			}
2521 			/*
2522 			 * Remember to restart the conflicting requests after
2523 			 * the new peer request has completed.
2524 			 */
2525 			peer_req->flags |= EE_RESTART_REQUESTS;
2526 		}
2527 	}
2528 	err = 0;
2529 
2530     out:
2531 	if (err)
2532 		drbd_remove_epoch_entry_interval(device, peer_req);
2533 	return err;
2534 }
2535 
2536 /* mirrored write */
2537 static int receive_Data(struct drbd_connection *connection, struct packet_info *pi)
2538 {
2539 	struct drbd_peer_device *peer_device;
2540 	struct drbd_device *device;
2541 	struct net_conf *nc;
2542 	sector_t sector;
2543 	struct drbd_peer_request *peer_req;
2544 	struct p_data *p = pi->data;
2545 	u32 peer_seq = be32_to_cpu(p->seq_num);
2546 	int op, op_flags;
2547 	u32 dp_flags;
2548 	int err, tp;
2549 
2550 	peer_device = conn_peer_device(connection, pi->vnr);
2551 	if (!peer_device)
2552 		return -EIO;
2553 	device = peer_device->device;
2554 
2555 	if (!get_ldev(device)) {
2556 		int err2;
2557 
2558 		err = wait_for_and_update_peer_seq(peer_device, peer_seq);
2559 		drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
2560 		atomic_inc(&connection->current_epoch->epoch_size);
2561 		err2 = drbd_drain_block(peer_device, pi->size);
2562 		if (!err)
2563 			err = err2;
2564 		return err;
2565 	}
2566 
2567 	/*
2568 	 * Corresponding put_ldev done either below (on various errors), or in
2569 	 * drbd_peer_request_endio, if we successfully submit the data at the
2570 	 * end of this function.
2571 	 */
2572 
2573 	sector = be64_to_cpu(p->sector);
2574 	peer_req = read_in_block(peer_device, p->block_id, sector, pi);
2575 	if (!peer_req) {
2576 		put_ldev(device);
2577 		return -EIO;
2578 	}
2579 
2580 	peer_req->w.cb = e_end_block;
2581 	peer_req->submit_jif = jiffies;
2582 	peer_req->flags |= EE_APPLICATION;
2583 
2584 	dp_flags = be32_to_cpu(p->dp_flags);
2585 	op = wire_flags_to_bio_op(dp_flags);
2586 	op_flags = wire_flags_to_bio_flags(dp_flags);
2587 	if (pi->cmd == P_TRIM) {
2588 		D_ASSERT(peer_device, peer_req->i.size > 0);
2589 		D_ASSERT(peer_device, op == REQ_OP_DISCARD);
2590 		D_ASSERT(peer_device, peer_req->pages == NULL);
2591 		/* need to play safe: an older DRBD sender
2592 		 * may mean zero-out while sending P_TRIM. */
2593 		if (0 == (connection->agreed_features & DRBD_FF_WZEROES))
2594 			peer_req->flags |= EE_ZEROOUT;
2595 	} else if (pi->cmd == P_ZEROES) {
2596 		D_ASSERT(peer_device, peer_req->i.size > 0);
2597 		D_ASSERT(peer_device, op == REQ_OP_WRITE_ZEROES);
2598 		D_ASSERT(peer_device, peer_req->pages == NULL);
2599 		/* Do (not) pass down BLKDEV_ZERO_NOUNMAP? */
2600 		if (dp_flags & DP_DISCARD)
2601 			peer_req->flags |= EE_TRIM;
2602 	} else if (peer_req->pages == NULL) {
2603 		D_ASSERT(device, peer_req->i.size == 0);
2604 		D_ASSERT(device, dp_flags & DP_FLUSH);
2605 	}
2606 
2607 	if (dp_flags & DP_MAY_SET_IN_SYNC)
2608 		peer_req->flags |= EE_MAY_SET_IN_SYNC;
2609 
2610 	spin_lock(&connection->epoch_lock);
2611 	peer_req->epoch = connection->current_epoch;
2612 	atomic_inc(&peer_req->epoch->epoch_size);
2613 	atomic_inc(&peer_req->epoch->active);
2614 	spin_unlock(&connection->epoch_lock);
2615 
2616 	rcu_read_lock();
2617 	nc = rcu_dereference(peer_device->connection->net_conf);
2618 	tp = nc->two_primaries;
2619 	if (peer_device->connection->agreed_pro_version < 100) {
2620 		switch (nc->wire_protocol) {
2621 		case DRBD_PROT_C:
2622 			dp_flags |= DP_SEND_WRITE_ACK;
2623 			break;
2624 		case DRBD_PROT_B:
2625 			dp_flags |= DP_SEND_RECEIVE_ACK;
2626 			break;
2627 		}
2628 	}
2629 	rcu_read_unlock();
2630 
2631 	if (dp_flags & DP_SEND_WRITE_ACK) {
2632 		peer_req->flags |= EE_SEND_WRITE_ACK;
2633 		inc_unacked(device);
2634 		/* corresponding dec_unacked() in e_end_block()
2635 		 * respective _drbd_clear_done_ee */
2636 	}
2637 
2638 	if (dp_flags & DP_SEND_RECEIVE_ACK) {
2639 		/* I really don't like it that the receiver thread
2640 		 * sends on the msock, but anyways */
2641 		drbd_send_ack(peer_device, P_RECV_ACK, peer_req);
2642 	}
2643 
2644 	if (tp) {
2645 		/* two primaries implies protocol C */
2646 		D_ASSERT(device, dp_flags & DP_SEND_WRITE_ACK);
2647 		peer_req->flags |= EE_IN_INTERVAL_TREE;
2648 		err = wait_for_and_update_peer_seq(peer_device, peer_seq);
2649 		if (err)
2650 			goto out_interrupted;
2651 		spin_lock_irq(&device->resource->req_lock);
2652 		err = handle_write_conflicts(device, peer_req);
2653 		if (err) {
2654 			spin_unlock_irq(&device->resource->req_lock);
2655 			if (err == -ENOENT) {
2656 				put_ldev(device);
2657 				return 0;
2658 			}
2659 			goto out_interrupted;
2660 		}
2661 	} else {
2662 		update_peer_seq(peer_device, peer_seq);
2663 		spin_lock_irq(&device->resource->req_lock);
2664 	}
2665 	/* TRIM and is processed synchronously,
2666 	 * we wait for all pending requests, respectively wait for
2667 	 * active_ee to become empty in drbd_submit_peer_request();
2668 	 * better not add ourselves here. */
2669 	if ((peer_req->flags & (EE_TRIM | EE_ZEROOUT)) == 0)
2670 		list_add_tail(&peer_req->w.list, &device->active_ee);
2671 	spin_unlock_irq(&device->resource->req_lock);
2672 
2673 	if (device->state.conn == C_SYNC_TARGET)
2674 		wait_event(device->ee_wait, !overlapping_resync_write(device, peer_req));
2675 
2676 	if (device->state.pdsk < D_INCONSISTENT) {
2677 		/* In case we have the only disk of the cluster, */
2678 		drbd_set_out_of_sync(device, peer_req->i.sector, peer_req->i.size);
2679 		peer_req->flags &= ~EE_MAY_SET_IN_SYNC;
2680 		drbd_al_begin_io(device, &peer_req->i);
2681 		peer_req->flags |= EE_CALL_AL_COMPLETE_IO;
2682 	}
2683 
2684 	err = drbd_submit_peer_request(device, peer_req, op, op_flags,
2685 				       DRBD_FAULT_DT_WR);
2686 	if (!err)
2687 		return 0;
2688 
2689 	/* don't care for the reason here */
2690 	drbd_err(device, "submit failed, triggering re-connect\n");
2691 	spin_lock_irq(&device->resource->req_lock);
2692 	list_del(&peer_req->w.list);
2693 	drbd_remove_epoch_entry_interval(device, peer_req);
2694 	spin_unlock_irq(&device->resource->req_lock);
2695 	if (peer_req->flags & EE_CALL_AL_COMPLETE_IO) {
2696 		peer_req->flags &= ~EE_CALL_AL_COMPLETE_IO;
2697 		drbd_al_complete_io(device, &peer_req->i);
2698 	}
2699 
2700 out_interrupted:
2701 	drbd_may_finish_epoch(connection, peer_req->epoch, EV_PUT | EV_CLEANUP);
2702 	put_ldev(device);
2703 	drbd_free_peer_req(device, peer_req);
2704 	return err;
2705 }
2706 
2707 /* We may throttle resync, if the lower device seems to be busy,
2708  * and current sync rate is above c_min_rate.
2709  *
2710  * To decide whether or not the lower device is busy, we use a scheme similar
2711  * to MD RAID is_mddev_idle(): if the partition stats reveal "significant"
2712  * (more than 64 sectors) of activity we cannot account for with our own resync
2713  * activity, it obviously is "busy".
2714  *
2715  * The current sync rate used here uses only the most recent two step marks,
2716  * to have a short time average so we can react faster.
2717  */
2718 bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector,
2719 		bool throttle_if_app_is_waiting)
2720 {
2721 	struct lc_element *tmp;
2722 	bool throttle = drbd_rs_c_min_rate_throttle(device);
2723 
2724 	if (!throttle || throttle_if_app_is_waiting)
2725 		return throttle;
2726 
2727 	spin_lock_irq(&device->al_lock);
2728 	tmp = lc_find(device->resync, BM_SECT_TO_EXT(sector));
2729 	if (tmp) {
2730 		struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
2731 		if (test_bit(BME_PRIORITY, &bm_ext->flags))
2732 			throttle = false;
2733 		/* Do not slow down if app IO is already waiting for this extent,
2734 		 * and our progress is necessary for application IO to complete. */
2735 	}
2736 	spin_unlock_irq(&device->al_lock);
2737 
2738 	return throttle;
2739 }
2740 
2741 bool drbd_rs_c_min_rate_throttle(struct drbd_device *device)
2742 {
2743 	struct gendisk *disk = device->ldev->backing_bdev->bd_disk;
2744 	unsigned long db, dt, dbdt;
2745 	unsigned int c_min_rate;
2746 	int curr_events;
2747 
2748 	rcu_read_lock();
2749 	c_min_rate = rcu_dereference(device->ldev->disk_conf)->c_min_rate;
2750 	rcu_read_unlock();
2751 
2752 	/* feature disabled? */
2753 	if (c_min_rate == 0)
2754 		return false;
2755 
2756 	curr_events = (int)part_stat_read_accum(disk->part0, sectors) -
2757 			atomic_read(&device->rs_sect_ev);
2758 
2759 	if (atomic_read(&device->ap_actlog_cnt)
2760 	    || curr_events - device->rs_last_events > 64) {
2761 		unsigned long rs_left;
2762 		int i;
2763 
2764 		device->rs_last_events = curr_events;
2765 
2766 		/* sync speed average over the last 2*DRBD_SYNC_MARK_STEP,
2767 		 * approx. */
2768 		i = (device->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
2769 
2770 		if (device->state.conn == C_VERIFY_S || device->state.conn == C_VERIFY_T)
2771 			rs_left = device->ov_left;
2772 		else
2773 			rs_left = drbd_bm_total_weight(device) - device->rs_failed;
2774 
2775 		dt = ((long)jiffies - (long)device->rs_mark_time[i]) / HZ;
2776 		if (!dt)
2777 			dt++;
2778 		db = device->rs_mark_left[i] - rs_left;
2779 		dbdt = Bit2KB(db/dt);
2780 
2781 		if (dbdt > c_min_rate)
2782 			return true;
2783 	}
2784 	return false;
2785 }
2786 
2787 static int receive_DataRequest(struct drbd_connection *connection, struct packet_info *pi)
2788 {
2789 	struct drbd_peer_device *peer_device;
2790 	struct drbd_device *device;
2791 	sector_t sector;
2792 	sector_t capacity;
2793 	struct drbd_peer_request *peer_req;
2794 	struct digest_info *di = NULL;
2795 	int size, verb;
2796 	unsigned int fault_type;
2797 	struct p_block_req *p =	pi->data;
2798 
2799 	peer_device = conn_peer_device(connection, pi->vnr);
2800 	if (!peer_device)
2801 		return -EIO;
2802 	device = peer_device->device;
2803 	capacity = get_capacity(device->vdisk);
2804 
2805 	sector = be64_to_cpu(p->sector);
2806 	size   = be32_to_cpu(p->blksize);
2807 
2808 	if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
2809 		drbd_err(device, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2810 				(unsigned long long)sector, size);
2811 		return -EINVAL;
2812 	}
2813 	if (sector + (size>>9) > capacity) {
2814 		drbd_err(device, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2815 				(unsigned long long)sector, size);
2816 		return -EINVAL;
2817 	}
2818 
2819 	if (!get_ldev_if_state(device, D_UP_TO_DATE)) {
2820 		verb = 1;
2821 		switch (pi->cmd) {
2822 		case P_DATA_REQUEST:
2823 			drbd_send_ack_rp(peer_device, P_NEG_DREPLY, p);
2824 			break;
2825 		case P_RS_THIN_REQ:
2826 		case P_RS_DATA_REQUEST:
2827 		case P_CSUM_RS_REQUEST:
2828 		case P_OV_REQUEST:
2829 			drbd_send_ack_rp(peer_device, P_NEG_RS_DREPLY , p);
2830 			break;
2831 		case P_OV_REPLY:
2832 			verb = 0;
2833 			dec_rs_pending(device);
2834 			drbd_send_ack_ex(peer_device, P_OV_RESULT, sector, size, ID_IN_SYNC);
2835 			break;
2836 		default:
2837 			BUG();
2838 		}
2839 		if (verb && __ratelimit(&drbd_ratelimit_state))
2840 			drbd_err(device, "Can not satisfy peer's read request, "
2841 			    "no local data.\n");
2842 
2843 		/* drain possibly payload */
2844 		return drbd_drain_block(peer_device, pi->size);
2845 	}
2846 
2847 	/* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
2848 	 * "criss-cross" setup, that might cause write-out on some other DRBD,
2849 	 * which in turn might block on the other node at this very place.  */
2850 	peer_req = drbd_alloc_peer_req(peer_device, p->block_id, sector, size,
2851 			size, GFP_NOIO);
2852 	if (!peer_req) {
2853 		put_ldev(device);
2854 		return -ENOMEM;
2855 	}
2856 
2857 	switch (pi->cmd) {
2858 	case P_DATA_REQUEST:
2859 		peer_req->w.cb = w_e_end_data_req;
2860 		fault_type = DRBD_FAULT_DT_RD;
2861 		/* application IO, don't drbd_rs_begin_io */
2862 		peer_req->flags |= EE_APPLICATION;
2863 		goto submit;
2864 
2865 	case P_RS_THIN_REQ:
2866 		/* If at some point in the future we have a smart way to
2867 		   find out if this data block is completely deallocated,
2868 		   then we would do something smarter here than reading
2869 		   the block... */
2870 		peer_req->flags |= EE_RS_THIN_REQ;
2871 		fallthrough;
2872 	case P_RS_DATA_REQUEST:
2873 		peer_req->w.cb = w_e_end_rsdata_req;
2874 		fault_type = DRBD_FAULT_RS_RD;
2875 		/* used in the sector offset progress display */
2876 		device->bm_resync_fo = BM_SECT_TO_BIT(sector);
2877 		break;
2878 
2879 	case P_OV_REPLY:
2880 	case P_CSUM_RS_REQUEST:
2881 		fault_type = DRBD_FAULT_RS_RD;
2882 		di = kmalloc(sizeof(*di) + pi->size, GFP_NOIO);
2883 		if (!di)
2884 			goto out_free_e;
2885 
2886 		di->digest_size = pi->size;
2887 		di->digest = (((char *)di)+sizeof(struct digest_info));
2888 
2889 		peer_req->digest = di;
2890 		peer_req->flags |= EE_HAS_DIGEST;
2891 
2892 		if (drbd_recv_all(peer_device->connection, di->digest, pi->size))
2893 			goto out_free_e;
2894 
2895 		if (pi->cmd == P_CSUM_RS_REQUEST) {
2896 			D_ASSERT(device, peer_device->connection->agreed_pro_version >= 89);
2897 			peer_req->w.cb = w_e_end_csum_rs_req;
2898 			/* used in the sector offset progress display */
2899 			device->bm_resync_fo = BM_SECT_TO_BIT(sector);
2900 			/* remember to report stats in drbd_resync_finished */
2901 			device->use_csums = true;
2902 		} else if (pi->cmd == P_OV_REPLY) {
2903 			/* track progress, we may need to throttle */
2904 			atomic_add(size >> 9, &device->rs_sect_in);
2905 			peer_req->w.cb = w_e_end_ov_reply;
2906 			dec_rs_pending(device);
2907 			/* drbd_rs_begin_io done when we sent this request,
2908 			 * but accounting still needs to be done. */
2909 			goto submit_for_resync;
2910 		}
2911 		break;
2912 
2913 	case P_OV_REQUEST:
2914 		if (device->ov_start_sector == ~(sector_t)0 &&
2915 		    peer_device->connection->agreed_pro_version >= 90) {
2916 			unsigned long now = jiffies;
2917 			int i;
2918 			device->ov_start_sector = sector;
2919 			device->ov_position = sector;
2920 			device->ov_left = drbd_bm_bits(device) - BM_SECT_TO_BIT(sector);
2921 			device->rs_total = device->ov_left;
2922 			for (i = 0; i < DRBD_SYNC_MARKS; i++) {
2923 				device->rs_mark_left[i] = device->ov_left;
2924 				device->rs_mark_time[i] = now;
2925 			}
2926 			drbd_info(device, "Online Verify start sector: %llu\n",
2927 					(unsigned long long)sector);
2928 		}
2929 		peer_req->w.cb = w_e_end_ov_req;
2930 		fault_type = DRBD_FAULT_RS_RD;
2931 		break;
2932 
2933 	default:
2934 		BUG();
2935 	}
2936 
2937 	/* Throttle, drbd_rs_begin_io and submit should become asynchronous
2938 	 * wrt the receiver, but it is not as straightforward as it may seem.
2939 	 * Various places in the resync start and stop logic assume resync
2940 	 * requests are processed in order, requeuing this on the worker thread
2941 	 * introduces a bunch of new code for synchronization between threads.
2942 	 *
2943 	 * Unlimited throttling before drbd_rs_begin_io may stall the resync
2944 	 * "forever", throttling after drbd_rs_begin_io will lock that extent
2945 	 * for application writes for the same time.  For now, just throttle
2946 	 * here, where the rest of the code expects the receiver to sleep for
2947 	 * a while, anyways.
2948 	 */
2949 
2950 	/* Throttle before drbd_rs_begin_io, as that locks out application IO;
2951 	 * this defers syncer requests for some time, before letting at least
2952 	 * on request through.  The resync controller on the receiving side
2953 	 * will adapt to the incoming rate accordingly.
2954 	 *
2955 	 * We cannot throttle here if remote is Primary/SyncTarget:
2956 	 * we would also throttle its application reads.
2957 	 * In that case, throttling is done on the SyncTarget only.
2958 	 */
2959 
2960 	/* Even though this may be a resync request, we do add to "read_ee";
2961 	 * "sync_ee" is only used for resync WRITEs.
2962 	 * Add to list early, so debugfs can find this request
2963 	 * even if we have to sleep below. */
2964 	spin_lock_irq(&device->resource->req_lock);
2965 	list_add_tail(&peer_req->w.list, &device->read_ee);
2966 	spin_unlock_irq(&device->resource->req_lock);
2967 
2968 	update_receiver_timing_details(connection, drbd_rs_should_slow_down);
2969 	if (device->state.peer != R_PRIMARY
2970 	&& drbd_rs_should_slow_down(device, sector, false))
2971 		schedule_timeout_uninterruptible(HZ/10);
2972 	update_receiver_timing_details(connection, drbd_rs_begin_io);
2973 	if (drbd_rs_begin_io(device, sector))
2974 		goto out_free_e;
2975 
2976 submit_for_resync:
2977 	atomic_add(size >> 9, &device->rs_sect_ev);
2978 
2979 submit:
2980 	update_receiver_timing_details(connection, drbd_submit_peer_request);
2981 	inc_unacked(device);
2982 	if (drbd_submit_peer_request(device, peer_req, REQ_OP_READ, 0,
2983 				     fault_type) == 0)
2984 		return 0;
2985 
2986 	/* don't care for the reason here */
2987 	drbd_err(device, "submit failed, triggering re-connect\n");
2988 
2989 out_free_e:
2990 	spin_lock_irq(&device->resource->req_lock);
2991 	list_del(&peer_req->w.list);
2992 	spin_unlock_irq(&device->resource->req_lock);
2993 	/* no drbd_rs_complete_io(), we are dropping the connection anyways */
2994 
2995 	put_ldev(device);
2996 	drbd_free_peer_req(device, peer_req);
2997 	return -EIO;
2998 }
2999 
3000 /*
3001  * drbd_asb_recover_0p  -  Recover after split-brain with no remaining primaries
3002  */
3003 static int drbd_asb_recover_0p(struct drbd_peer_device *peer_device) __must_hold(local)
3004 {
3005 	struct drbd_device *device = peer_device->device;
3006 	int self, peer, rv = -100;
3007 	unsigned long ch_self, ch_peer;
3008 	enum drbd_after_sb_p after_sb_0p;
3009 
3010 	self = device->ldev->md.uuid[UI_BITMAP] & 1;
3011 	peer = device->p_uuid[UI_BITMAP] & 1;
3012 
3013 	ch_peer = device->p_uuid[UI_SIZE];
3014 	ch_self = device->comm_bm_set;
3015 
3016 	rcu_read_lock();
3017 	after_sb_0p = rcu_dereference(peer_device->connection->net_conf)->after_sb_0p;
3018 	rcu_read_unlock();
3019 	switch (after_sb_0p) {
3020 	case ASB_CONSENSUS:
3021 	case ASB_DISCARD_SECONDARY:
3022 	case ASB_CALL_HELPER:
3023 	case ASB_VIOLENTLY:
3024 		drbd_err(device, "Configuration error.\n");
3025 		break;
3026 	case ASB_DISCONNECT:
3027 		break;
3028 	case ASB_DISCARD_YOUNGER_PRI:
3029 		if (self == 0 && peer == 1) {
3030 			rv = -1;
3031 			break;
3032 		}
3033 		if (self == 1 && peer == 0) {
3034 			rv =  1;
3035 			break;
3036 		}
3037 		fallthrough;	/* to one of the other strategies */
3038 	case ASB_DISCARD_OLDER_PRI:
3039 		if (self == 0 && peer == 1) {
3040 			rv = 1;
3041 			break;
3042 		}
3043 		if (self == 1 && peer == 0) {
3044 			rv = -1;
3045 			break;
3046 		}
3047 		/* Else fall through to one of the other strategies... */
3048 		drbd_warn(device, "Discard younger/older primary did not find a decision\n"
3049 		     "Using discard-least-changes instead\n");
3050 		fallthrough;
3051 	case ASB_DISCARD_ZERO_CHG:
3052 		if (ch_peer == 0 && ch_self == 0) {
3053 			rv = test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags)
3054 				? -1 : 1;
3055 			break;
3056 		} else {
3057 			if (ch_peer == 0) { rv =  1; break; }
3058 			if (ch_self == 0) { rv = -1; break; }
3059 		}
3060 		if (after_sb_0p == ASB_DISCARD_ZERO_CHG)
3061 			break;
3062 		fallthrough;
3063 	case ASB_DISCARD_LEAST_CHG:
3064 		if	(ch_self < ch_peer)
3065 			rv = -1;
3066 		else if (ch_self > ch_peer)
3067 			rv =  1;
3068 		else /* ( ch_self == ch_peer ) */
3069 		     /* Well, then use something else. */
3070 			rv = test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags)
3071 				? -1 : 1;
3072 		break;
3073 	case ASB_DISCARD_LOCAL:
3074 		rv = -1;
3075 		break;
3076 	case ASB_DISCARD_REMOTE:
3077 		rv =  1;
3078 	}
3079 
3080 	return rv;
3081 }
3082 
3083 /*
3084  * drbd_asb_recover_1p  -  Recover after split-brain with one remaining primary
3085  */
3086 static int drbd_asb_recover_1p(struct drbd_peer_device *peer_device) __must_hold(local)
3087 {
3088 	struct drbd_device *device = peer_device->device;
3089 	int hg, rv = -100;
3090 	enum drbd_after_sb_p after_sb_1p;
3091 
3092 	rcu_read_lock();
3093 	after_sb_1p = rcu_dereference(peer_device->connection->net_conf)->after_sb_1p;
3094 	rcu_read_unlock();
3095 	switch (after_sb_1p) {
3096 	case ASB_DISCARD_YOUNGER_PRI:
3097 	case ASB_DISCARD_OLDER_PRI:
3098 	case ASB_DISCARD_LEAST_CHG:
3099 	case ASB_DISCARD_LOCAL:
3100 	case ASB_DISCARD_REMOTE:
3101 	case ASB_DISCARD_ZERO_CHG:
3102 		drbd_err(device, "Configuration error.\n");
3103 		break;
3104 	case ASB_DISCONNECT:
3105 		break;
3106 	case ASB_CONSENSUS:
3107 		hg = drbd_asb_recover_0p(peer_device);
3108 		if (hg == -1 && device->state.role == R_SECONDARY)
3109 			rv = hg;
3110 		if (hg == 1  && device->state.role == R_PRIMARY)
3111 			rv = hg;
3112 		break;
3113 	case ASB_VIOLENTLY:
3114 		rv = drbd_asb_recover_0p(peer_device);
3115 		break;
3116 	case ASB_DISCARD_SECONDARY:
3117 		return device->state.role == R_PRIMARY ? 1 : -1;
3118 	case ASB_CALL_HELPER:
3119 		hg = drbd_asb_recover_0p(peer_device);
3120 		if (hg == -1 && device->state.role == R_PRIMARY) {
3121 			enum drbd_state_rv rv2;
3122 
3123 			 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
3124 			  * we might be here in C_WF_REPORT_PARAMS which is transient.
3125 			  * we do not need to wait for the after state change work either. */
3126 			rv2 = drbd_change_state(device, CS_VERBOSE, NS(role, R_SECONDARY));
3127 			if (rv2 != SS_SUCCESS) {
3128 				drbd_khelper(device, "pri-lost-after-sb");
3129 			} else {
3130 				drbd_warn(device, "Successfully gave up primary role.\n");
3131 				rv = hg;
3132 			}
3133 		} else
3134 			rv = hg;
3135 	}
3136 
3137 	return rv;
3138 }
3139 
3140 /*
3141  * drbd_asb_recover_2p  -  Recover after split-brain with two remaining primaries
3142  */
3143 static int drbd_asb_recover_2p(struct drbd_peer_device *peer_device) __must_hold(local)
3144 {
3145 	struct drbd_device *device = peer_device->device;
3146 	int hg, rv = -100;
3147 	enum drbd_after_sb_p after_sb_2p;
3148 
3149 	rcu_read_lock();
3150 	after_sb_2p = rcu_dereference(peer_device->connection->net_conf)->after_sb_2p;
3151 	rcu_read_unlock();
3152 	switch (after_sb_2p) {
3153 	case ASB_DISCARD_YOUNGER_PRI:
3154 	case ASB_DISCARD_OLDER_PRI:
3155 	case ASB_DISCARD_LEAST_CHG:
3156 	case ASB_DISCARD_LOCAL:
3157 	case ASB_DISCARD_REMOTE:
3158 	case ASB_CONSENSUS:
3159 	case ASB_DISCARD_SECONDARY:
3160 	case ASB_DISCARD_ZERO_CHG:
3161 		drbd_err(device, "Configuration error.\n");
3162 		break;
3163 	case ASB_VIOLENTLY:
3164 		rv = drbd_asb_recover_0p(peer_device);
3165 		break;
3166 	case ASB_DISCONNECT:
3167 		break;
3168 	case ASB_CALL_HELPER:
3169 		hg = drbd_asb_recover_0p(peer_device);
3170 		if (hg == -1) {
3171 			enum drbd_state_rv rv2;
3172 
3173 			 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
3174 			  * we might be here in C_WF_REPORT_PARAMS which is transient.
3175 			  * we do not need to wait for the after state change work either. */
3176 			rv2 = drbd_change_state(device, CS_VERBOSE, NS(role, R_SECONDARY));
3177 			if (rv2 != SS_SUCCESS) {
3178 				drbd_khelper(device, "pri-lost-after-sb");
3179 			} else {
3180 				drbd_warn(device, "Successfully gave up primary role.\n");
3181 				rv = hg;
3182 			}
3183 		} else
3184 			rv = hg;
3185 	}
3186 
3187 	return rv;
3188 }
3189 
3190 static void drbd_uuid_dump(struct drbd_device *device, char *text, u64 *uuid,
3191 			   u64 bits, u64 flags)
3192 {
3193 	if (!uuid) {
3194 		drbd_info(device, "%s uuid info vanished while I was looking!\n", text);
3195 		return;
3196 	}
3197 	drbd_info(device, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
3198 	     text,
3199 	     (unsigned long long)uuid[UI_CURRENT],
3200 	     (unsigned long long)uuid[UI_BITMAP],
3201 	     (unsigned long long)uuid[UI_HISTORY_START],
3202 	     (unsigned long long)uuid[UI_HISTORY_END],
3203 	     (unsigned long long)bits,
3204 	     (unsigned long long)flags);
3205 }
3206 
3207 /*
3208   100	after split brain try auto recover
3209     2	C_SYNC_SOURCE set BitMap
3210     1	C_SYNC_SOURCE use BitMap
3211     0	no Sync
3212    -1	C_SYNC_TARGET use BitMap
3213    -2	C_SYNC_TARGET set BitMap
3214  -100	after split brain, disconnect
3215 -1000	unrelated data
3216 -1091   requires proto 91
3217 -1096   requires proto 96
3218  */
3219 
3220 static int drbd_uuid_compare(struct drbd_device *const device, enum drbd_role const peer_role, int *rule_nr) __must_hold(local)
3221 {
3222 	struct drbd_peer_device *const peer_device = first_peer_device(device);
3223 	struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL;
3224 	u64 self, peer;
3225 	int i, j;
3226 
3227 	self = device->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
3228 	peer = device->p_uuid[UI_CURRENT] & ~((u64)1);
3229 
3230 	*rule_nr = 10;
3231 	if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED)
3232 		return 0;
3233 
3234 	*rule_nr = 20;
3235 	if ((self == UUID_JUST_CREATED || self == (u64)0) &&
3236 	     peer != UUID_JUST_CREATED)
3237 		return -2;
3238 
3239 	*rule_nr = 30;
3240 	if (self != UUID_JUST_CREATED &&
3241 	    (peer == UUID_JUST_CREATED || peer == (u64)0))
3242 		return 2;
3243 
3244 	if (self == peer) {
3245 		int rct, dc; /* roles at crash time */
3246 
3247 		if (device->p_uuid[UI_BITMAP] == (u64)0 && device->ldev->md.uuid[UI_BITMAP] != (u64)0) {
3248 
3249 			if (connection->agreed_pro_version < 91)
3250 				return -1091;
3251 
3252 			if ((device->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
3253 			    (device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
3254 				drbd_info(device, "was SyncSource, missed the resync finished event, corrected myself:\n");
3255 				drbd_uuid_move_history(device);
3256 				device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[UI_BITMAP];
3257 				device->ldev->md.uuid[UI_BITMAP] = 0;
3258 
3259 				drbd_uuid_dump(device, "self", device->ldev->md.uuid,
3260 					       device->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(device) : 0, 0);
3261 				*rule_nr = 34;
3262 			} else {
3263 				drbd_info(device, "was SyncSource (peer failed to write sync_uuid)\n");
3264 				*rule_nr = 36;
3265 			}
3266 
3267 			return 1;
3268 		}
3269 
3270 		if (device->ldev->md.uuid[UI_BITMAP] == (u64)0 && device->p_uuid[UI_BITMAP] != (u64)0) {
3271 
3272 			if (connection->agreed_pro_version < 91)
3273 				return -1091;
3274 
3275 			if ((device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (device->p_uuid[UI_BITMAP] & ~((u64)1)) &&
3276 			    (device->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
3277 				drbd_info(device, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
3278 
3279 				device->p_uuid[UI_HISTORY_START + 1] = device->p_uuid[UI_HISTORY_START];
3280 				device->p_uuid[UI_HISTORY_START] = device->p_uuid[UI_BITMAP];
3281 				device->p_uuid[UI_BITMAP] = 0UL;
3282 
3283 				drbd_uuid_dump(device, "peer", device->p_uuid, device->p_uuid[UI_SIZE], device->p_uuid[UI_FLAGS]);
3284 				*rule_nr = 35;
3285 			} else {
3286 				drbd_info(device, "was SyncTarget (failed to write sync_uuid)\n");
3287 				*rule_nr = 37;
3288 			}
3289 
3290 			return -1;
3291 		}
3292 
3293 		/* Common power [off|failure] */
3294 		rct = (test_bit(CRASHED_PRIMARY, &device->flags) ? 1 : 0) +
3295 			(device->p_uuid[UI_FLAGS] & 2);
3296 		/* lowest bit is set when we were primary,
3297 		 * next bit (weight 2) is set when peer was primary */
3298 		*rule_nr = 40;
3299 
3300 		/* Neither has the "crashed primary" flag set,
3301 		 * only a replication link hickup. */
3302 		if (rct == 0)
3303 			return 0;
3304 
3305 		/* Current UUID equal and no bitmap uuid; does not necessarily
3306 		 * mean this was a "simultaneous hard crash", maybe IO was
3307 		 * frozen, so no UUID-bump happened.
3308 		 * This is a protocol change, overload DRBD_FF_WSAME as flag
3309 		 * for "new-enough" peer DRBD version. */
3310 		if (device->state.role == R_PRIMARY || peer_role == R_PRIMARY) {
3311 			*rule_nr = 41;
3312 			if (!(connection->agreed_features & DRBD_FF_WSAME)) {
3313 				drbd_warn(peer_device, "Equivalent unrotated UUIDs, but current primary present.\n");
3314 				return -(0x10000 | PRO_VERSION_MAX | (DRBD_FF_WSAME << 8));
3315 			}
3316 			if (device->state.role == R_PRIMARY && peer_role == R_PRIMARY) {
3317 				/* At least one has the "crashed primary" bit set,
3318 				 * both are primary now, but neither has rotated its UUIDs?
3319 				 * "Can not happen." */
3320 				drbd_err(peer_device, "Equivalent unrotated UUIDs, but both are primary. Can not resolve this.\n");
3321 				return -100;
3322 			}
3323 			if (device->state.role == R_PRIMARY)
3324 				return 1;
3325 			return -1;
3326 		}
3327 
3328 		/* Both are secondary.
3329 		 * Really looks like recovery from simultaneous hard crash.
3330 		 * Check which had been primary before, and arbitrate. */
3331 		switch (rct) {
3332 		case 0: /* !self_pri && !peer_pri */ return 0; /* already handled */
3333 		case 1: /*  self_pri && !peer_pri */ return 1;
3334 		case 2: /* !self_pri &&  peer_pri */ return -1;
3335 		case 3: /*  self_pri &&  peer_pri */
3336 			dc = test_bit(RESOLVE_CONFLICTS, &connection->flags);
3337 			return dc ? -1 : 1;
3338 		}
3339 	}
3340 
3341 	*rule_nr = 50;
3342 	peer = device->p_uuid[UI_BITMAP] & ~((u64)1);
3343 	if (self == peer)
3344 		return -1;
3345 
3346 	*rule_nr = 51;
3347 	peer = device->p_uuid[UI_HISTORY_START] & ~((u64)1);
3348 	if (self == peer) {
3349 		if (connection->agreed_pro_version < 96 ?
3350 		    (device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) ==
3351 		    (device->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) :
3352 		    peer + UUID_NEW_BM_OFFSET == (device->p_uuid[UI_BITMAP] & ~((u64)1))) {
3353 			/* The last P_SYNC_UUID did not get though. Undo the last start of
3354 			   resync as sync source modifications of the peer's UUIDs. */
3355 
3356 			if (connection->agreed_pro_version < 91)
3357 				return -1091;
3358 
3359 			device->p_uuid[UI_BITMAP] = device->p_uuid[UI_HISTORY_START];
3360 			device->p_uuid[UI_HISTORY_START] = device->p_uuid[UI_HISTORY_START + 1];
3361 
3362 			drbd_info(device, "Lost last syncUUID packet, corrected:\n");
3363 			drbd_uuid_dump(device, "peer", device->p_uuid, device->p_uuid[UI_SIZE], device->p_uuid[UI_FLAGS]);
3364 
3365 			return -1;
3366 		}
3367 	}
3368 
3369 	*rule_nr = 60;
3370 	self = device->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
3371 	for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
3372 		peer = device->p_uuid[i] & ~((u64)1);
3373 		if (self == peer)
3374 			return -2;
3375 	}
3376 
3377 	*rule_nr = 70;
3378 	self = device->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
3379 	peer = device->p_uuid[UI_CURRENT] & ~((u64)1);
3380 	if (self == peer)
3381 		return 1;
3382 
3383 	*rule_nr = 71;
3384 	self = device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
3385 	if (self == peer) {
3386 		if (connection->agreed_pro_version < 96 ?
3387 		    (device->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) ==
3388 		    (device->p_uuid[UI_HISTORY_START] & ~((u64)1)) :
3389 		    self + UUID_NEW_BM_OFFSET == (device->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) {
3390 			/* The last P_SYNC_UUID did not get though. Undo the last start of
3391 			   resync as sync source modifications of our UUIDs. */
3392 
3393 			if (connection->agreed_pro_version < 91)
3394 				return -1091;
3395 
3396 			__drbd_uuid_set(device, UI_BITMAP, device->ldev->md.uuid[UI_HISTORY_START]);
3397 			__drbd_uuid_set(device, UI_HISTORY_START, device->ldev->md.uuid[UI_HISTORY_START + 1]);
3398 
3399 			drbd_info(device, "Last syncUUID did not get through, corrected:\n");
3400 			drbd_uuid_dump(device, "self", device->ldev->md.uuid,
3401 				       device->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(device) : 0, 0);
3402 
3403 			return 1;
3404 		}
3405 	}
3406 
3407 
3408 	*rule_nr = 80;
3409 	peer = device->p_uuid[UI_CURRENT] & ~((u64)1);
3410 	for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
3411 		self = device->ldev->md.uuid[i] & ~((u64)1);
3412 		if (self == peer)
3413 			return 2;
3414 	}
3415 
3416 	*rule_nr = 90;
3417 	self = device->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
3418 	peer = device->p_uuid[UI_BITMAP] & ~((u64)1);
3419 	if (self == peer && self != ((u64)0))
3420 		return 100;
3421 
3422 	*rule_nr = 100;
3423 	for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
3424 		self = device->ldev->md.uuid[i] & ~((u64)1);
3425 		for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) {
3426 			peer = device->p_uuid[j] & ~((u64)1);
3427 			if (self == peer)
3428 				return -100;
3429 		}
3430 	}
3431 
3432 	return -1000;
3433 }
3434 
3435 /* drbd_sync_handshake() returns the new conn state on success, or
3436    CONN_MASK (-1) on failure.
3437  */
3438 static enum drbd_conns drbd_sync_handshake(struct drbd_peer_device *peer_device,
3439 					   enum drbd_role peer_role,
3440 					   enum drbd_disk_state peer_disk) __must_hold(local)
3441 {
3442 	struct drbd_device *device = peer_device->device;
3443 	enum drbd_conns rv = C_MASK;
3444 	enum drbd_disk_state mydisk;
3445 	struct net_conf *nc;
3446 	int hg, rule_nr, rr_conflict, tentative, always_asbp;
3447 
3448 	mydisk = device->state.disk;
3449 	if (mydisk == D_NEGOTIATING)
3450 		mydisk = device->new_state_tmp.disk;
3451 
3452 	drbd_info(device, "drbd_sync_handshake:\n");
3453 
3454 	spin_lock_irq(&device->ldev->md.uuid_lock);
3455 	drbd_uuid_dump(device, "self", device->ldev->md.uuid, device->comm_bm_set, 0);
3456 	drbd_uuid_dump(device, "peer", device->p_uuid,
3457 		       device->p_uuid[UI_SIZE], device->p_uuid[UI_FLAGS]);
3458 
3459 	hg = drbd_uuid_compare(device, peer_role, &rule_nr);
3460 	spin_unlock_irq(&device->ldev->md.uuid_lock);
3461 
3462 	drbd_info(device, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
3463 
3464 	if (hg == -1000) {
3465 		drbd_alert(device, "Unrelated data, aborting!\n");
3466 		return C_MASK;
3467 	}
3468 	if (hg < -0x10000) {
3469 		int proto, fflags;
3470 		hg = -hg;
3471 		proto = hg & 0xff;
3472 		fflags = (hg >> 8) & 0xff;
3473 		drbd_alert(device, "To resolve this both sides have to support at least protocol %d and feature flags 0x%x\n",
3474 					proto, fflags);
3475 		return C_MASK;
3476 	}
3477 	if (hg < -1000) {
3478 		drbd_alert(device, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000);
3479 		return C_MASK;
3480 	}
3481 
3482 	if    ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) ||
3483 	    (peer_disk == D_INCONSISTENT && mydisk    > D_INCONSISTENT)) {
3484 		int f = (hg == -100) || abs(hg) == 2;
3485 		hg = mydisk > D_INCONSISTENT ? 1 : -1;
3486 		if (f)
3487 			hg = hg*2;
3488 		drbd_info(device, "Becoming sync %s due to disk states.\n",
3489 		     hg > 0 ? "source" : "target");
3490 	}
3491 
3492 	if (abs(hg) == 100)
3493 		drbd_khelper(device, "initial-split-brain");
3494 
3495 	rcu_read_lock();
3496 	nc = rcu_dereference(peer_device->connection->net_conf);
3497 	always_asbp = nc->always_asbp;
3498 	rr_conflict = nc->rr_conflict;
3499 	tentative = nc->tentative;
3500 	rcu_read_unlock();
3501 
3502 	if (hg == 100 || (hg == -100 && always_asbp)) {
3503 		int pcount = (device->state.role == R_PRIMARY)
3504 			   + (peer_role == R_PRIMARY);
3505 		int forced = (hg == -100);
3506 
3507 		switch (pcount) {
3508 		case 0:
3509 			hg = drbd_asb_recover_0p(peer_device);
3510 			break;
3511 		case 1:
3512 			hg = drbd_asb_recover_1p(peer_device);
3513 			break;
3514 		case 2:
3515 			hg = drbd_asb_recover_2p(peer_device);
3516 			break;
3517 		}
3518 		if (abs(hg) < 100) {
3519 			drbd_warn(device, "Split-Brain detected, %d primaries, "
3520 			     "automatically solved. Sync from %s node\n",
3521 			     pcount, (hg < 0) ? "peer" : "this");
3522 			if (forced) {
3523 				drbd_warn(device, "Doing a full sync, since"
3524 				     " UUIDs where ambiguous.\n");
3525 				hg = hg*2;
3526 			}
3527 		}
3528 	}
3529 
3530 	if (hg == -100) {
3531 		if (test_bit(DISCARD_MY_DATA, &device->flags) && !(device->p_uuid[UI_FLAGS]&1))
3532 			hg = -1;
3533 		if (!test_bit(DISCARD_MY_DATA, &device->flags) && (device->p_uuid[UI_FLAGS]&1))
3534 			hg = 1;
3535 
3536 		if (abs(hg) < 100)
3537 			drbd_warn(device, "Split-Brain detected, manually solved. "
3538 			     "Sync from %s node\n",
3539 			     (hg < 0) ? "peer" : "this");
3540 	}
3541 
3542 	if (hg == -100) {
3543 		/* FIXME this log message is not correct if we end up here
3544 		 * after an attempted attach on a diskless node.
3545 		 * We just refuse to attach -- well, we drop the "connection"
3546 		 * to that disk, in a way... */
3547 		drbd_alert(device, "Split-Brain detected but unresolved, dropping connection!\n");
3548 		drbd_khelper(device, "split-brain");
3549 		return C_MASK;
3550 	}
3551 
3552 	if (hg > 0 && mydisk <= D_INCONSISTENT) {
3553 		drbd_err(device, "I shall become SyncSource, but I am inconsistent!\n");
3554 		return C_MASK;
3555 	}
3556 
3557 	if (hg < 0 && /* by intention we do not use mydisk here. */
3558 	    device->state.role == R_PRIMARY && device->state.disk >= D_CONSISTENT) {
3559 		switch (rr_conflict) {
3560 		case ASB_CALL_HELPER:
3561 			drbd_khelper(device, "pri-lost");
3562 			fallthrough;
3563 		case ASB_DISCONNECT:
3564 			drbd_err(device, "I shall become SyncTarget, but I am primary!\n");
3565 			return C_MASK;
3566 		case ASB_VIOLENTLY:
3567 			drbd_warn(device, "Becoming SyncTarget, violating the stable-data"
3568 			     "assumption\n");
3569 		}
3570 	}
3571 
3572 	if (tentative || test_bit(CONN_DRY_RUN, &peer_device->connection->flags)) {
3573 		if (hg == 0)
3574 			drbd_info(device, "dry-run connect: No resync, would become Connected immediately.\n");
3575 		else
3576 			drbd_info(device, "dry-run connect: Would become %s, doing a %s resync.",
3577 				 drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET),
3578 				 abs(hg) >= 2 ? "full" : "bit-map based");
3579 		return C_MASK;
3580 	}
3581 
3582 	if (abs(hg) >= 2) {
3583 		drbd_info(device, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
3584 		if (drbd_bitmap_io(device, &drbd_bmio_set_n_write, "set_n_write from sync_handshake",
3585 					BM_LOCKED_SET_ALLOWED))
3586 			return C_MASK;
3587 	}
3588 
3589 	if (hg > 0) { /* become sync source. */
3590 		rv = C_WF_BITMAP_S;
3591 	} else if (hg < 0) { /* become sync target */
3592 		rv = C_WF_BITMAP_T;
3593 	} else {
3594 		rv = C_CONNECTED;
3595 		if (drbd_bm_total_weight(device)) {
3596 			drbd_info(device, "No resync, but %lu bits in bitmap!\n",
3597 			     drbd_bm_total_weight(device));
3598 		}
3599 	}
3600 
3601 	return rv;
3602 }
3603 
3604 static enum drbd_after_sb_p convert_after_sb(enum drbd_after_sb_p peer)
3605 {
3606 	/* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
3607 	if (peer == ASB_DISCARD_REMOTE)
3608 		return ASB_DISCARD_LOCAL;
3609 
3610 	/* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
3611 	if (peer == ASB_DISCARD_LOCAL)
3612 		return ASB_DISCARD_REMOTE;
3613 
3614 	/* everything else is valid if they are equal on both sides. */
3615 	return peer;
3616 }
3617 
3618 static int receive_protocol(struct drbd_connection *connection, struct packet_info *pi)
3619 {
3620 	struct p_protocol *p = pi->data;
3621 	enum drbd_after_sb_p p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
3622 	int p_proto, p_discard_my_data, p_two_primaries, cf;
3623 	struct net_conf *nc, *old_net_conf, *new_net_conf = NULL;
3624 	char integrity_alg[SHARED_SECRET_MAX] = "";
3625 	struct crypto_shash *peer_integrity_tfm = NULL;
3626 	void *int_dig_in = NULL, *int_dig_vv = NULL;
3627 
3628 	p_proto		= be32_to_cpu(p->protocol);
3629 	p_after_sb_0p	= be32_to_cpu(p->after_sb_0p);
3630 	p_after_sb_1p	= be32_to_cpu(p->after_sb_1p);
3631 	p_after_sb_2p	= be32_to_cpu(p->after_sb_2p);
3632 	p_two_primaries = be32_to_cpu(p->two_primaries);
3633 	cf		= be32_to_cpu(p->conn_flags);
3634 	p_discard_my_data = cf & CF_DISCARD_MY_DATA;
3635 
3636 	if (connection->agreed_pro_version >= 87) {
3637 		int err;
3638 
3639 		if (pi->size > sizeof(integrity_alg))
3640 			return -EIO;
3641 		err = drbd_recv_all(connection, integrity_alg, pi->size);
3642 		if (err)
3643 			return err;
3644 		integrity_alg[SHARED_SECRET_MAX - 1] = 0;
3645 	}
3646 
3647 	if (pi->cmd != P_PROTOCOL_UPDATE) {
3648 		clear_bit(CONN_DRY_RUN, &connection->flags);
3649 
3650 		if (cf & CF_DRY_RUN)
3651 			set_bit(CONN_DRY_RUN, &connection->flags);
3652 
3653 		rcu_read_lock();
3654 		nc = rcu_dereference(connection->net_conf);
3655 
3656 		if (p_proto != nc->wire_protocol) {
3657 			drbd_err(connection, "incompatible %s settings\n", "protocol");
3658 			goto disconnect_rcu_unlock;
3659 		}
3660 
3661 		if (convert_after_sb(p_after_sb_0p) != nc->after_sb_0p) {
3662 			drbd_err(connection, "incompatible %s settings\n", "after-sb-0pri");
3663 			goto disconnect_rcu_unlock;
3664 		}
3665 
3666 		if (convert_after_sb(p_after_sb_1p) != nc->after_sb_1p) {
3667 			drbd_err(connection, "incompatible %s settings\n", "after-sb-1pri");
3668 			goto disconnect_rcu_unlock;
3669 		}
3670 
3671 		if (convert_after_sb(p_after_sb_2p) != nc->after_sb_2p) {
3672 			drbd_err(connection, "incompatible %s settings\n", "after-sb-2pri");
3673 			goto disconnect_rcu_unlock;
3674 		}
3675 
3676 		if (p_discard_my_data && nc->discard_my_data) {
3677 			drbd_err(connection, "incompatible %s settings\n", "discard-my-data");
3678 			goto disconnect_rcu_unlock;
3679 		}
3680 
3681 		if (p_two_primaries != nc->two_primaries) {
3682 			drbd_err(connection, "incompatible %s settings\n", "allow-two-primaries");
3683 			goto disconnect_rcu_unlock;
3684 		}
3685 
3686 		if (strcmp(integrity_alg, nc->integrity_alg)) {
3687 			drbd_err(connection, "incompatible %s settings\n", "data-integrity-alg");
3688 			goto disconnect_rcu_unlock;
3689 		}
3690 
3691 		rcu_read_unlock();
3692 	}
3693 
3694 	if (integrity_alg[0]) {
3695 		int hash_size;
3696 
3697 		/*
3698 		 * We can only change the peer data integrity algorithm
3699 		 * here.  Changing our own data integrity algorithm
3700 		 * requires that we send a P_PROTOCOL_UPDATE packet at
3701 		 * the same time; otherwise, the peer has no way to
3702 		 * tell between which packets the algorithm should
3703 		 * change.
3704 		 */
3705 
3706 		peer_integrity_tfm = crypto_alloc_shash(integrity_alg, 0, 0);
3707 		if (IS_ERR(peer_integrity_tfm)) {
3708 			peer_integrity_tfm = NULL;
3709 			drbd_err(connection, "peer data-integrity-alg %s not supported\n",
3710 				 integrity_alg);
3711 			goto disconnect;
3712 		}
3713 
3714 		hash_size = crypto_shash_digestsize(peer_integrity_tfm);
3715 		int_dig_in = kmalloc(hash_size, GFP_KERNEL);
3716 		int_dig_vv = kmalloc(hash_size, GFP_KERNEL);
3717 		if (!(int_dig_in && int_dig_vv)) {
3718 			drbd_err(connection, "Allocation of buffers for data integrity checking failed\n");
3719 			goto disconnect;
3720 		}
3721 	}
3722 
3723 	new_net_conf = kmalloc(sizeof(struct net_conf), GFP_KERNEL);
3724 	if (!new_net_conf)
3725 		goto disconnect;
3726 
3727 	mutex_lock(&connection->data.mutex);
3728 	mutex_lock(&connection->resource->conf_update);
3729 	old_net_conf = connection->net_conf;
3730 	*new_net_conf = *old_net_conf;
3731 
3732 	new_net_conf->wire_protocol = p_proto;
3733 	new_net_conf->after_sb_0p = convert_after_sb(p_after_sb_0p);
3734 	new_net_conf->after_sb_1p = convert_after_sb(p_after_sb_1p);
3735 	new_net_conf->after_sb_2p = convert_after_sb(p_after_sb_2p);
3736 	new_net_conf->two_primaries = p_two_primaries;
3737 
3738 	rcu_assign_pointer(connection->net_conf, new_net_conf);
3739 	mutex_unlock(&connection->resource->conf_update);
3740 	mutex_unlock(&connection->data.mutex);
3741 
3742 	crypto_free_shash(connection->peer_integrity_tfm);
3743 	kfree(connection->int_dig_in);
3744 	kfree(connection->int_dig_vv);
3745 	connection->peer_integrity_tfm = peer_integrity_tfm;
3746 	connection->int_dig_in = int_dig_in;
3747 	connection->int_dig_vv = int_dig_vv;
3748 
3749 	if (strcmp(old_net_conf->integrity_alg, integrity_alg))
3750 		drbd_info(connection, "peer data-integrity-alg: %s\n",
3751 			  integrity_alg[0] ? integrity_alg : "(none)");
3752 
3753 	synchronize_rcu();
3754 	kfree(old_net_conf);
3755 	return 0;
3756 
3757 disconnect_rcu_unlock:
3758 	rcu_read_unlock();
3759 disconnect:
3760 	crypto_free_shash(peer_integrity_tfm);
3761 	kfree(int_dig_in);
3762 	kfree(int_dig_vv);
3763 	conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
3764 	return -EIO;
3765 }
3766 
3767 /* helper function
3768  * input: alg name, feature name
3769  * return: NULL (alg name was "")
3770  *         ERR_PTR(error) if something goes wrong
3771  *         or the crypto hash ptr, if it worked out ok. */
3772 static struct crypto_shash *drbd_crypto_alloc_digest_safe(
3773 		const struct drbd_device *device,
3774 		const char *alg, const char *name)
3775 {
3776 	struct crypto_shash *tfm;
3777 
3778 	if (!alg[0])
3779 		return NULL;
3780 
3781 	tfm = crypto_alloc_shash(alg, 0, 0);
3782 	if (IS_ERR(tfm)) {
3783 		drbd_err(device, "Can not allocate \"%s\" as %s (reason: %ld)\n",
3784 			alg, name, PTR_ERR(tfm));
3785 		return tfm;
3786 	}
3787 	return tfm;
3788 }
3789 
3790 static int ignore_remaining_packet(struct drbd_connection *connection, struct packet_info *pi)
3791 {
3792 	void *buffer = connection->data.rbuf;
3793 	int size = pi->size;
3794 
3795 	while (size) {
3796 		int s = min_t(int, size, DRBD_SOCKET_BUFFER_SIZE);
3797 		s = drbd_recv(connection, buffer, s);
3798 		if (s <= 0) {
3799 			if (s < 0)
3800 				return s;
3801 			break;
3802 		}
3803 		size -= s;
3804 	}
3805 	if (size)
3806 		return -EIO;
3807 	return 0;
3808 }
3809 
3810 /*
3811  * config_unknown_volume  -  device configuration command for unknown volume
3812  *
3813  * When a device is added to an existing connection, the node on which the
3814  * device is added first will send configuration commands to its peer but the
3815  * peer will not know about the device yet.  It will warn and ignore these
3816  * commands.  Once the device is added on the second node, the second node will
3817  * send the same device configuration commands, but in the other direction.
3818  *
3819  * (We can also end up here if drbd is misconfigured.)
3820  */
3821 static int config_unknown_volume(struct drbd_connection *connection, struct packet_info *pi)
3822 {
3823 	drbd_warn(connection, "%s packet received for volume %u, which is not configured locally\n",
3824 		  cmdname(pi->cmd), pi->vnr);
3825 	return ignore_remaining_packet(connection, pi);
3826 }
3827 
3828 static int receive_SyncParam(struct drbd_connection *connection, struct packet_info *pi)
3829 {
3830 	struct drbd_peer_device *peer_device;
3831 	struct drbd_device *device;
3832 	struct p_rs_param_95 *p;
3833 	unsigned int header_size, data_size, exp_max_sz;
3834 	struct crypto_shash *verify_tfm = NULL;
3835 	struct crypto_shash *csums_tfm = NULL;
3836 	struct net_conf *old_net_conf, *new_net_conf = NULL;
3837 	struct disk_conf *old_disk_conf = NULL, *new_disk_conf = NULL;
3838 	const int apv = connection->agreed_pro_version;
3839 	struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
3840 	unsigned int fifo_size = 0;
3841 	int err;
3842 
3843 	peer_device = conn_peer_device(connection, pi->vnr);
3844 	if (!peer_device)
3845 		return config_unknown_volume(connection, pi);
3846 	device = peer_device->device;
3847 
3848 	exp_max_sz  = apv <= 87 ? sizeof(struct p_rs_param)
3849 		    : apv == 88 ? sizeof(struct p_rs_param)
3850 					+ SHARED_SECRET_MAX
3851 		    : apv <= 94 ? sizeof(struct p_rs_param_89)
3852 		    : /* apv >= 95 */ sizeof(struct p_rs_param_95);
3853 
3854 	if (pi->size > exp_max_sz) {
3855 		drbd_err(device, "SyncParam packet too long: received %u, expected <= %u bytes\n",
3856 		    pi->size, exp_max_sz);
3857 		return -EIO;
3858 	}
3859 
3860 	if (apv <= 88) {
3861 		header_size = sizeof(struct p_rs_param);
3862 		data_size = pi->size - header_size;
3863 	} else if (apv <= 94) {
3864 		header_size = sizeof(struct p_rs_param_89);
3865 		data_size = pi->size - header_size;
3866 		D_ASSERT(device, data_size == 0);
3867 	} else {
3868 		header_size = sizeof(struct p_rs_param_95);
3869 		data_size = pi->size - header_size;
3870 		D_ASSERT(device, data_size == 0);
3871 	}
3872 
3873 	/* initialize verify_alg and csums_alg */
3874 	p = pi->data;
3875 	BUILD_BUG_ON(sizeof(p->algs) != 2 * SHARED_SECRET_MAX);
3876 	memset(&p->algs, 0, sizeof(p->algs));
3877 
3878 	err = drbd_recv_all(peer_device->connection, p, header_size);
3879 	if (err)
3880 		return err;
3881 
3882 	mutex_lock(&connection->resource->conf_update);
3883 	old_net_conf = peer_device->connection->net_conf;
3884 	if (get_ldev(device)) {
3885 		new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
3886 		if (!new_disk_conf) {
3887 			put_ldev(device);
3888 			mutex_unlock(&connection->resource->conf_update);
3889 			drbd_err(device, "Allocation of new disk_conf failed\n");
3890 			return -ENOMEM;
3891 		}
3892 
3893 		old_disk_conf = device->ldev->disk_conf;
3894 		*new_disk_conf = *old_disk_conf;
3895 
3896 		new_disk_conf->resync_rate = be32_to_cpu(p->resync_rate);
3897 	}
3898 
3899 	if (apv >= 88) {
3900 		if (apv == 88) {
3901 			if (data_size > SHARED_SECRET_MAX || data_size == 0) {
3902 				drbd_err(device, "verify-alg of wrong size, "
3903 					"peer wants %u, accepting only up to %u byte\n",
3904 					data_size, SHARED_SECRET_MAX);
3905 				err = -EIO;
3906 				goto reconnect;
3907 			}
3908 
3909 			err = drbd_recv_all(peer_device->connection, p->verify_alg, data_size);
3910 			if (err)
3911 				goto reconnect;
3912 			/* we expect NUL terminated string */
3913 			/* but just in case someone tries to be evil */
3914 			D_ASSERT(device, p->verify_alg[data_size-1] == 0);
3915 			p->verify_alg[data_size-1] = 0;
3916 
3917 		} else /* apv >= 89 */ {
3918 			/* we still expect NUL terminated strings */
3919 			/* but just in case someone tries to be evil */
3920 			D_ASSERT(device, p->verify_alg[SHARED_SECRET_MAX-1] == 0);
3921 			D_ASSERT(device, p->csums_alg[SHARED_SECRET_MAX-1] == 0);
3922 			p->verify_alg[SHARED_SECRET_MAX-1] = 0;
3923 			p->csums_alg[SHARED_SECRET_MAX-1] = 0;
3924 		}
3925 
3926 		if (strcmp(old_net_conf->verify_alg, p->verify_alg)) {
3927 			if (device->state.conn == C_WF_REPORT_PARAMS) {
3928 				drbd_err(device, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
3929 				    old_net_conf->verify_alg, p->verify_alg);
3930 				goto disconnect;
3931 			}
3932 			verify_tfm = drbd_crypto_alloc_digest_safe(device,
3933 					p->verify_alg, "verify-alg");
3934 			if (IS_ERR(verify_tfm)) {
3935 				verify_tfm = NULL;
3936 				goto disconnect;
3937 			}
3938 		}
3939 
3940 		if (apv >= 89 && strcmp(old_net_conf->csums_alg, p->csums_alg)) {
3941 			if (device->state.conn == C_WF_REPORT_PARAMS) {
3942 				drbd_err(device, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
3943 				    old_net_conf->csums_alg, p->csums_alg);
3944 				goto disconnect;
3945 			}
3946 			csums_tfm = drbd_crypto_alloc_digest_safe(device,
3947 					p->csums_alg, "csums-alg");
3948 			if (IS_ERR(csums_tfm)) {
3949 				csums_tfm = NULL;
3950 				goto disconnect;
3951 			}
3952 		}
3953 
3954 		if (apv > 94 && new_disk_conf) {
3955 			new_disk_conf->c_plan_ahead = be32_to_cpu(p->c_plan_ahead);
3956 			new_disk_conf->c_delay_target = be32_to_cpu(p->c_delay_target);
3957 			new_disk_conf->c_fill_target = be32_to_cpu(p->c_fill_target);
3958 			new_disk_conf->c_max_rate = be32_to_cpu(p->c_max_rate);
3959 
3960 			fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
3961 			if (fifo_size != device->rs_plan_s->size) {
3962 				new_plan = fifo_alloc(fifo_size);
3963 				if (!new_plan) {
3964 					drbd_err(device, "kmalloc of fifo_buffer failed");
3965 					put_ldev(device);
3966 					goto disconnect;
3967 				}
3968 			}
3969 		}
3970 
3971 		if (verify_tfm || csums_tfm) {
3972 			new_net_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
3973 			if (!new_net_conf)
3974 				goto disconnect;
3975 
3976 			*new_net_conf = *old_net_conf;
3977 
3978 			if (verify_tfm) {
3979 				strcpy(new_net_conf->verify_alg, p->verify_alg);
3980 				new_net_conf->verify_alg_len = strlen(p->verify_alg) + 1;
3981 				crypto_free_shash(peer_device->connection->verify_tfm);
3982 				peer_device->connection->verify_tfm = verify_tfm;
3983 				drbd_info(device, "using verify-alg: \"%s\"\n", p->verify_alg);
3984 			}
3985 			if (csums_tfm) {
3986 				strcpy(new_net_conf->csums_alg, p->csums_alg);
3987 				new_net_conf->csums_alg_len = strlen(p->csums_alg) + 1;
3988 				crypto_free_shash(peer_device->connection->csums_tfm);
3989 				peer_device->connection->csums_tfm = csums_tfm;
3990 				drbd_info(device, "using csums-alg: \"%s\"\n", p->csums_alg);
3991 			}
3992 			rcu_assign_pointer(connection->net_conf, new_net_conf);
3993 		}
3994 	}
3995 
3996 	if (new_disk_conf) {
3997 		rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
3998 		put_ldev(device);
3999 	}
4000 
4001 	if (new_plan) {
4002 		old_plan = device->rs_plan_s;
4003 		rcu_assign_pointer(device->rs_plan_s, new_plan);
4004 	}
4005 
4006 	mutex_unlock(&connection->resource->conf_update);
4007 	synchronize_rcu();
4008 	if (new_net_conf)
4009 		kfree(old_net_conf);
4010 	kfree(old_disk_conf);
4011 	kfree(old_plan);
4012 
4013 	return 0;
4014 
4015 reconnect:
4016 	if (new_disk_conf) {
4017 		put_ldev(device);
4018 		kfree(new_disk_conf);
4019 	}
4020 	mutex_unlock(&connection->resource->conf_update);
4021 	return -EIO;
4022 
4023 disconnect:
4024 	kfree(new_plan);
4025 	if (new_disk_conf) {
4026 		put_ldev(device);
4027 		kfree(new_disk_conf);
4028 	}
4029 	mutex_unlock(&connection->resource->conf_update);
4030 	/* just for completeness: actually not needed,
4031 	 * as this is not reached if csums_tfm was ok. */
4032 	crypto_free_shash(csums_tfm);
4033 	/* but free the verify_tfm again, if csums_tfm did not work out */
4034 	crypto_free_shash(verify_tfm);
4035 	conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
4036 	return -EIO;
4037 }
4038 
4039 /* warn if the arguments differ by more than 12.5% */
4040 static void warn_if_differ_considerably(struct drbd_device *device,
4041 	const char *s, sector_t a, sector_t b)
4042 {
4043 	sector_t d;
4044 	if (a == 0 || b == 0)
4045 		return;
4046 	d = (a > b) ? (a - b) : (b - a);
4047 	if (d > (a>>3) || d > (b>>3))
4048 		drbd_warn(device, "Considerable difference in %s: %llus vs. %llus\n", s,
4049 		     (unsigned long long)a, (unsigned long long)b);
4050 }
4051 
4052 static int receive_sizes(struct drbd_connection *connection, struct packet_info *pi)
4053 {
4054 	struct drbd_peer_device *peer_device;
4055 	struct drbd_device *device;
4056 	struct p_sizes *p = pi->data;
4057 	struct o_qlim *o = (connection->agreed_features & DRBD_FF_WSAME) ? p->qlim : NULL;
4058 	enum determine_dev_size dd = DS_UNCHANGED;
4059 	sector_t p_size, p_usize, p_csize, my_usize;
4060 	sector_t new_size, cur_size;
4061 	int ldsc = 0; /* local disk size changed */
4062 	enum dds_flags ddsf;
4063 
4064 	peer_device = conn_peer_device(connection, pi->vnr);
4065 	if (!peer_device)
4066 		return config_unknown_volume(connection, pi);
4067 	device = peer_device->device;
4068 	cur_size = get_capacity(device->vdisk);
4069 
4070 	p_size = be64_to_cpu(p->d_size);
4071 	p_usize = be64_to_cpu(p->u_size);
4072 	p_csize = be64_to_cpu(p->c_size);
4073 
4074 	/* just store the peer's disk size for now.
4075 	 * we still need to figure out whether we accept that. */
4076 	device->p_size = p_size;
4077 
4078 	if (get_ldev(device)) {
4079 		rcu_read_lock();
4080 		my_usize = rcu_dereference(device->ldev->disk_conf)->disk_size;
4081 		rcu_read_unlock();
4082 
4083 		warn_if_differ_considerably(device, "lower level device sizes",
4084 			   p_size, drbd_get_max_capacity(device->ldev));
4085 		warn_if_differ_considerably(device, "user requested size",
4086 					    p_usize, my_usize);
4087 
4088 		/* if this is the first connect, or an otherwise expected
4089 		 * param exchange, choose the minimum */
4090 		if (device->state.conn == C_WF_REPORT_PARAMS)
4091 			p_usize = min_not_zero(my_usize, p_usize);
4092 
4093 		/* Never shrink a device with usable data during connect,
4094 		 * or "attach" on the peer.
4095 		 * But allow online shrinking if we are connected. */
4096 		new_size = drbd_new_dev_size(device, device->ldev, p_usize, 0);
4097 		if (new_size < cur_size &&
4098 		    device->state.disk >= D_OUTDATED &&
4099 		    (device->state.conn < C_CONNECTED || device->state.pdsk == D_DISKLESS)) {
4100 			drbd_err(device, "The peer's disk size is too small! (%llu < %llu sectors)\n",
4101 					(unsigned long long)new_size, (unsigned long long)cur_size);
4102 			conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
4103 			put_ldev(device);
4104 			return -EIO;
4105 		}
4106 
4107 		if (my_usize != p_usize) {
4108 			struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
4109 
4110 			new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
4111 			if (!new_disk_conf) {
4112 				put_ldev(device);
4113 				return -ENOMEM;
4114 			}
4115 
4116 			mutex_lock(&connection->resource->conf_update);
4117 			old_disk_conf = device->ldev->disk_conf;
4118 			*new_disk_conf = *old_disk_conf;
4119 			new_disk_conf->disk_size = p_usize;
4120 
4121 			rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
4122 			mutex_unlock(&connection->resource->conf_update);
4123 			synchronize_rcu();
4124 			kfree(old_disk_conf);
4125 
4126 			drbd_info(device, "Peer sets u_size to %lu sectors (old: %lu)\n",
4127 				 (unsigned long)p_usize, (unsigned long)my_usize);
4128 		}
4129 
4130 		put_ldev(device);
4131 	}
4132 
4133 	device->peer_max_bio_size = be32_to_cpu(p->max_bio_size);
4134 	/* Leave drbd_reconsider_queue_parameters() before drbd_determine_dev_size().
4135 	   In case we cleared the QUEUE_FLAG_DISCARD from our queue in
4136 	   drbd_reconsider_queue_parameters(), we can be sure that after
4137 	   drbd_determine_dev_size() no REQ_DISCARDs are in the queue. */
4138 
4139 	ddsf = be16_to_cpu(p->dds_flags);
4140 	if (get_ldev(device)) {
4141 		drbd_reconsider_queue_parameters(device, device->ldev, o);
4142 		dd = drbd_determine_dev_size(device, ddsf, NULL);
4143 		put_ldev(device);
4144 		if (dd == DS_ERROR)
4145 			return -EIO;
4146 		drbd_md_sync(device);
4147 	} else {
4148 		/*
4149 		 * I am diskless, need to accept the peer's *current* size.
4150 		 * I must NOT accept the peers backing disk size,
4151 		 * it may have been larger than mine all along...
4152 		 *
4153 		 * At this point, the peer knows more about my disk, or at
4154 		 * least about what we last agreed upon, than myself.
4155 		 * So if his c_size is less than his d_size, the most likely
4156 		 * reason is that *my* d_size was smaller last time we checked.
4157 		 *
4158 		 * However, if he sends a zero current size,
4159 		 * take his (user-capped or) backing disk size anyways.
4160 		 *
4161 		 * Unless of course he does not have a disk himself.
4162 		 * In which case we ignore this completely.
4163 		 */
4164 		sector_t new_size = p_csize ?: p_usize ?: p_size;
4165 		drbd_reconsider_queue_parameters(device, NULL, o);
4166 		if (new_size == 0) {
4167 			/* Ignore, peer does not know nothing. */
4168 		} else if (new_size == cur_size) {
4169 			/* nothing to do */
4170 		} else if (cur_size != 0 && p_size == 0) {
4171 			drbd_warn(device, "Ignored diskless peer device size (peer:%llu != me:%llu sectors)!\n",
4172 					(unsigned long long)new_size, (unsigned long long)cur_size);
4173 		} else if (new_size < cur_size && device->state.role == R_PRIMARY) {
4174 			drbd_err(device, "The peer's device size is too small! (%llu < %llu sectors); demote me first!\n",
4175 					(unsigned long long)new_size, (unsigned long long)cur_size);
4176 			conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
4177 			return -EIO;
4178 		} else {
4179 			/* I believe the peer, if
4180 			 *  - I don't have a current size myself
4181 			 *  - we agree on the size anyways
4182 			 *  - I do have a current size, am Secondary,
4183 			 *    and he has the only disk
4184 			 *  - I do have a current size, am Primary,
4185 			 *    and he has the only disk,
4186 			 *    which is larger than my current size
4187 			 */
4188 			drbd_set_my_capacity(device, new_size);
4189 		}
4190 	}
4191 
4192 	if (get_ldev(device)) {
4193 		if (device->ldev->known_size != drbd_get_capacity(device->ldev->backing_bdev)) {
4194 			device->ldev->known_size = drbd_get_capacity(device->ldev->backing_bdev);
4195 			ldsc = 1;
4196 		}
4197 
4198 		put_ldev(device);
4199 	}
4200 
4201 	if (device->state.conn > C_WF_REPORT_PARAMS) {
4202 		if (be64_to_cpu(p->c_size) != get_capacity(device->vdisk) ||
4203 		    ldsc) {
4204 			/* we have different sizes, probably peer
4205 			 * needs to know my new size... */
4206 			drbd_send_sizes(peer_device, 0, ddsf);
4207 		}
4208 		if (test_and_clear_bit(RESIZE_PENDING, &device->flags) ||
4209 		    (dd == DS_GREW && device->state.conn == C_CONNECTED)) {
4210 			if (device->state.pdsk >= D_INCONSISTENT &&
4211 			    device->state.disk >= D_INCONSISTENT) {
4212 				if (ddsf & DDSF_NO_RESYNC)
4213 					drbd_info(device, "Resync of new storage suppressed with --assume-clean\n");
4214 				else
4215 					resync_after_online_grow(device);
4216 			} else
4217 				set_bit(RESYNC_AFTER_NEG, &device->flags);
4218 		}
4219 	}
4220 
4221 	return 0;
4222 }
4223 
4224 static int receive_uuids(struct drbd_connection *connection, struct packet_info *pi)
4225 {
4226 	struct drbd_peer_device *peer_device;
4227 	struct drbd_device *device;
4228 	struct p_uuids *p = pi->data;
4229 	u64 *p_uuid;
4230 	int i, updated_uuids = 0;
4231 
4232 	peer_device = conn_peer_device(connection, pi->vnr);
4233 	if (!peer_device)
4234 		return config_unknown_volume(connection, pi);
4235 	device = peer_device->device;
4236 
4237 	p_uuid = kmalloc_array(UI_EXTENDED_SIZE, sizeof(*p_uuid), GFP_NOIO);
4238 	if (!p_uuid)
4239 		return false;
4240 
4241 	for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++)
4242 		p_uuid[i] = be64_to_cpu(p->uuid[i]);
4243 
4244 	kfree(device->p_uuid);
4245 	device->p_uuid = p_uuid;
4246 
4247 	if ((device->state.conn < C_CONNECTED || device->state.pdsk == D_DISKLESS) &&
4248 	    device->state.disk < D_INCONSISTENT &&
4249 	    device->state.role == R_PRIMARY &&
4250 	    (device->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
4251 		drbd_err(device, "Can only connect to data with current UUID=%016llX\n",
4252 		    (unsigned long long)device->ed_uuid);
4253 		conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
4254 		return -EIO;
4255 	}
4256 
4257 	if (get_ldev(device)) {
4258 		int skip_initial_sync =
4259 			device->state.conn == C_CONNECTED &&
4260 			peer_device->connection->agreed_pro_version >= 90 &&
4261 			device->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
4262 			(p_uuid[UI_FLAGS] & 8);
4263 		if (skip_initial_sync) {
4264 			drbd_info(device, "Accepted new current UUID, preparing to skip initial sync\n");
4265 			drbd_bitmap_io(device, &drbd_bmio_clear_n_write,
4266 					"clear_n_write from receive_uuids",
4267 					BM_LOCKED_TEST_ALLOWED);
4268 			_drbd_uuid_set(device, UI_CURRENT, p_uuid[UI_CURRENT]);
4269 			_drbd_uuid_set(device, UI_BITMAP, 0);
4270 			_drbd_set_state(_NS2(device, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
4271 					CS_VERBOSE, NULL);
4272 			drbd_md_sync(device);
4273 			updated_uuids = 1;
4274 		}
4275 		put_ldev(device);
4276 	} else if (device->state.disk < D_INCONSISTENT &&
4277 		   device->state.role == R_PRIMARY) {
4278 		/* I am a diskless primary, the peer just created a new current UUID
4279 		   for me. */
4280 		updated_uuids = drbd_set_ed_uuid(device, p_uuid[UI_CURRENT]);
4281 	}
4282 
4283 	/* Before we test for the disk state, we should wait until an eventually
4284 	   ongoing cluster wide state change is finished. That is important if
4285 	   we are primary and are detaching from our disk. We need to see the
4286 	   new disk state... */
4287 	mutex_lock(device->state_mutex);
4288 	mutex_unlock(device->state_mutex);
4289 	if (device->state.conn >= C_CONNECTED && device->state.disk < D_INCONSISTENT)
4290 		updated_uuids |= drbd_set_ed_uuid(device, p_uuid[UI_CURRENT]);
4291 
4292 	if (updated_uuids)
4293 		drbd_print_uuids(device, "receiver updated UUIDs to");
4294 
4295 	return 0;
4296 }
4297 
4298 /**
4299  * convert_state() - Converts the peer's view of the cluster state to our point of view
4300  * @ps:		The state as seen by the peer.
4301  */
4302 static union drbd_state convert_state(union drbd_state ps)
4303 {
4304 	union drbd_state ms;
4305 
4306 	static enum drbd_conns c_tab[] = {
4307 		[C_WF_REPORT_PARAMS] = C_WF_REPORT_PARAMS,
4308 		[C_CONNECTED] = C_CONNECTED,
4309 
4310 		[C_STARTING_SYNC_S] = C_STARTING_SYNC_T,
4311 		[C_STARTING_SYNC_T] = C_STARTING_SYNC_S,
4312 		[C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */
4313 		[C_VERIFY_S]       = C_VERIFY_T,
4314 		[C_MASK]   = C_MASK,
4315 	};
4316 
4317 	ms.i = ps.i;
4318 
4319 	ms.conn = c_tab[ps.conn];
4320 	ms.peer = ps.role;
4321 	ms.role = ps.peer;
4322 	ms.pdsk = ps.disk;
4323 	ms.disk = ps.pdsk;
4324 	ms.peer_isp = (ps.aftr_isp | ps.user_isp);
4325 
4326 	return ms;
4327 }
4328 
4329 static int receive_req_state(struct drbd_connection *connection, struct packet_info *pi)
4330 {
4331 	struct drbd_peer_device *peer_device;
4332 	struct drbd_device *device;
4333 	struct p_req_state *p = pi->data;
4334 	union drbd_state mask, val;
4335 	enum drbd_state_rv rv;
4336 
4337 	peer_device = conn_peer_device(connection, pi->vnr);
4338 	if (!peer_device)
4339 		return -EIO;
4340 	device = peer_device->device;
4341 
4342 	mask.i = be32_to_cpu(p->mask);
4343 	val.i = be32_to_cpu(p->val);
4344 
4345 	if (test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags) &&
4346 	    mutex_is_locked(device->state_mutex)) {
4347 		drbd_send_sr_reply(peer_device, SS_CONCURRENT_ST_CHG);
4348 		return 0;
4349 	}
4350 
4351 	mask = convert_state(mask);
4352 	val = convert_state(val);
4353 
4354 	rv = drbd_change_state(device, CS_VERBOSE, mask, val);
4355 	drbd_send_sr_reply(peer_device, rv);
4356 
4357 	drbd_md_sync(device);
4358 
4359 	return 0;
4360 }
4361 
4362 static int receive_req_conn_state(struct drbd_connection *connection, struct packet_info *pi)
4363 {
4364 	struct p_req_state *p = pi->data;
4365 	union drbd_state mask, val;
4366 	enum drbd_state_rv rv;
4367 
4368 	mask.i = be32_to_cpu(p->mask);
4369 	val.i = be32_to_cpu(p->val);
4370 
4371 	if (test_bit(RESOLVE_CONFLICTS, &connection->flags) &&
4372 	    mutex_is_locked(&connection->cstate_mutex)) {
4373 		conn_send_sr_reply(connection, SS_CONCURRENT_ST_CHG);
4374 		return 0;
4375 	}
4376 
4377 	mask = convert_state(mask);
4378 	val = convert_state(val);
4379 
4380 	rv = conn_request_state(connection, mask, val, CS_VERBOSE | CS_LOCAL_ONLY | CS_IGN_OUTD_FAIL);
4381 	conn_send_sr_reply(connection, rv);
4382 
4383 	return 0;
4384 }
4385 
4386 static int receive_state(struct drbd_connection *connection, struct packet_info *pi)
4387 {
4388 	struct drbd_peer_device *peer_device;
4389 	struct drbd_device *device;
4390 	struct p_state *p = pi->data;
4391 	union drbd_state os, ns, peer_state;
4392 	enum drbd_disk_state real_peer_disk;
4393 	enum chg_state_flags cs_flags;
4394 	int rv;
4395 
4396 	peer_device = conn_peer_device(connection, pi->vnr);
4397 	if (!peer_device)
4398 		return config_unknown_volume(connection, pi);
4399 	device = peer_device->device;
4400 
4401 	peer_state.i = be32_to_cpu(p->state);
4402 
4403 	real_peer_disk = peer_state.disk;
4404 	if (peer_state.disk == D_NEGOTIATING) {
4405 		real_peer_disk = device->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT;
4406 		drbd_info(device, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
4407 	}
4408 
4409 	spin_lock_irq(&device->resource->req_lock);
4410  retry:
4411 	os = ns = drbd_read_state(device);
4412 	spin_unlock_irq(&device->resource->req_lock);
4413 
4414 	/* If some other part of the code (ack_receiver thread, timeout)
4415 	 * already decided to close the connection again,
4416 	 * we must not "re-establish" it here. */
4417 	if (os.conn <= C_TEAR_DOWN)
4418 		return -ECONNRESET;
4419 
4420 	/* If this is the "end of sync" confirmation, usually the peer disk
4421 	 * transitions from D_INCONSISTENT to D_UP_TO_DATE. For empty (0 bits
4422 	 * set) resync started in PausedSyncT, or if the timing of pause-/
4423 	 * unpause-sync events has been "just right", the peer disk may
4424 	 * transition from D_CONSISTENT to D_UP_TO_DATE as well.
4425 	 */
4426 	if ((os.pdsk == D_INCONSISTENT || os.pdsk == D_CONSISTENT) &&
4427 	    real_peer_disk == D_UP_TO_DATE &&
4428 	    os.conn > C_CONNECTED && os.disk == D_UP_TO_DATE) {
4429 		/* If we are (becoming) SyncSource, but peer is still in sync
4430 		 * preparation, ignore its uptodate-ness to avoid flapping, it
4431 		 * will change to inconsistent once the peer reaches active
4432 		 * syncing states.
4433 		 * It may have changed syncer-paused flags, however, so we
4434 		 * cannot ignore this completely. */
4435 		if (peer_state.conn > C_CONNECTED &&
4436 		    peer_state.conn < C_SYNC_SOURCE)
4437 			real_peer_disk = D_INCONSISTENT;
4438 
4439 		/* if peer_state changes to connected at the same time,
4440 		 * it explicitly notifies us that it finished resync.
4441 		 * Maybe we should finish it up, too? */
4442 		else if (os.conn >= C_SYNC_SOURCE &&
4443 			 peer_state.conn == C_CONNECTED) {
4444 			if (drbd_bm_total_weight(device) <= device->rs_failed)
4445 				drbd_resync_finished(device);
4446 			return 0;
4447 		}
4448 	}
4449 
4450 	/* explicit verify finished notification, stop sector reached. */
4451 	if (os.conn == C_VERIFY_T && os.disk == D_UP_TO_DATE &&
4452 	    peer_state.conn == C_CONNECTED && real_peer_disk == D_UP_TO_DATE) {
4453 		ov_out_of_sync_print(device);
4454 		drbd_resync_finished(device);
4455 		return 0;
4456 	}
4457 
4458 	/* peer says his disk is inconsistent, while we think it is uptodate,
4459 	 * and this happens while the peer still thinks we have a sync going on,
4460 	 * but we think we are already done with the sync.
4461 	 * We ignore this to avoid flapping pdsk.
4462 	 * This should not happen, if the peer is a recent version of drbd. */
4463 	if (os.pdsk == D_UP_TO_DATE && real_peer_disk == D_INCONSISTENT &&
4464 	    os.conn == C_CONNECTED && peer_state.conn > C_SYNC_SOURCE)
4465 		real_peer_disk = D_UP_TO_DATE;
4466 
4467 	if (ns.conn == C_WF_REPORT_PARAMS)
4468 		ns.conn = C_CONNECTED;
4469 
4470 	if (peer_state.conn == C_AHEAD)
4471 		ns.conn = C_BEHIND;
4472 
4473 	/* TODO:
4474 	 * if (primary and diskless and peer uuid != effective uuid)
4475 	 *     abort attach on peer;
4476 	 *
4477 	 * If this node does not have good data, was already connected, but
4478 	 * the peer did a late attach only now, trying to "negotiate" with me,
4479 	 * AND I am currently Primary, possibly frozen, with some specific
4480 	 * "effective" uuid, this should never be reached, really, because
4481 	 * we first send the uuids, then the current state.
4482 	 *
4483 	 * In this scenario, we already dropped the connection hard
4484 	 * when we received the unsuitable uuids (receive_uuids().
4485 	 *
4486 	 * Should we want to change this, that is: not drop the connection in
4487 	 * receive_uuids() already, then we would need to add a branch here
4488 	 * that aborts the attach of "unsuitable uuids" on the peer in case
4489 	 * this node is currently Diskless Primary.
4490 	 */
4491 
4492 	if (device->p_uuid && peer_state.disk >= D_NEGOTIATING &&
4493 	    get_ldev_if_state(device, D_NEGOTIATING)) {
4494 		int cr; /* consider resync */
4495 
4496 		/* if we established a new connection */
4497 		cr  = (os.conn < C_CONNECTED);
4498 		/* if we had an established connection
4499 		 * and one of the nodes newly attaches a disk */
4500 		cr |= (os.conn == C_CONNECTED &&
4501 		       (peer_state.disk == D_NEGOTIATING ||
4502 			os.disk == D_NEGOTIATING));
4503 		/* if we have both been inconsistent, and the peer has been
4504 		 * forced to be UpToDate with --force */
4505 		cr |= test_bit(CONSIDER_RESYNC, &device->flags);
4506 		/* if we had been plain connected, and the admin requested to
4507 		 * start a sync by "invalidate" or "invalidate-remote" */
4508 		cr |= (os.conn == C_CONNECTED &&
4509 				(peer_state.conn >= C_STARTING_SYNC_S &&
4510 				 peer_state.conn <= C_WF_BITMAP_T));
4511 
4512 		if (cr)
4513 			ns.conn = drbd_sync_handshake(peer_device, peer_state.role, real_peer_disk);
4514 
4515 		put_ldev(device);
4516 		if (ns.conn == C_MASK) {
4517 			ns.conn = C_CONNECTED;
4518 			if (device->state.disk == D_NEGOTIATING) {
4519 				drbd_force_state(device, NS(disk, D_FAILED));
4520 			} else if (peer_state.disk == D_NEGOTIATING) {
4521 				drbd_err(device, "Disk attach process on the peer node was aborted.\n");
4522 				peer_state.disk = D_DISKLESS;
4523 				real_peer_disk = D_DISKLESS;
4524 			} else {
4525 				if (test_and_clear_bit(CONN_DRY_RUN, &peer_device->connection->flags))
4526 					return -EIO;
4527 				D_ASSERT(device, os.conn == C_WF_REPORT_PARAMS);
4528 				conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
4529 				return -EIO;
4530 			}
4531 		}
4532 	}
4533 
4534 	spin_lock_irq(&device->resource->req_lock);
4535 	if (os.i != drbd_read_state(device).i)
4536 		goto retry;
4537 	clear_bit(CONSIDER_RESYNC, &device->flags);
4538 	ns.peer = peer_state.role;
4539 	ns.pdsk = real_peer_disk;
4540 	ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
4541 	if ((ns.conn == C_CONNECTED || ns.conn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING)
4542 		ns.disk = device->new_state_tmp.disk;
4543 	cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD);
4544 	if (ns.pdsk == D_CONSISTENT && drbd_suspended(device) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
4545 	    test_bit(NEW_CUR_UUID, &device->flags)) {
4546 		/* Do not allow tl_restart(RESEND) for a rebooted peer. We can only allow this
4547 		   for temporal network outages! */
4548 		spin_unlock_irq(&device->resource->req_lock);
4549 		drbd_err(device, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
4550 		tl_clear(peer_device->connection);
4551 		drbd_uuid_new_current(device);
4552 		clear_bit(NEW_CUR_UUID, &device->flags);
4553 		conn_request_state(peer_device->connection, NS2(conn, C_PROTOCOL_ERROR, susp, 0), CS_HARD);
4554 		return -EIO;
4555 	}
4556 	rv = _drbd_set_state(device, ns, cs_flags, NULL);
4557 	ns = drbd_read_state(device);
4558 	spin_unlock_irq(&device->resource->req_lock);
4559 
4560 	if (rv < SS_SUCCESS) {
4561 		conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
4562 		return -EIO;
4563 	}
4564 
4565 	if (os.conn > C_WF_REPORT_PARAMS) {
4566 		if (ns.conn > C_CONNECTED && peer_state.conn <= C_CONNECTED &&
4567 		    peer_state.disk != D_NEGOTIATING ) {
4568 			/* we want resync, peer has not yet decided to sync... */
4569 			/* Nowadays only used when forcing a node into primary role and
4570 			   setting its disk to UpToDate with that */
4571 			drbd_send_uuids(peer_device);
4572 			drbd_send_current_state(peer_device);
4573 		}
4574 	}
4575 
4576 	clear_bit(DISCARD_MY_DATA, &device->flags);
4577 
4578 	drbd_md_sync(device); /* update connected indicator, la_size_sect, ... */
4579 
4580 	return 0;
4581 }
4582 
4583 static int receive_sync_uuid(struct drbd_connection *connection, struct packet_info *pi)
4584 {
4585 	struct drbd_peer_device *peer_device;
4586 	struct drbd_device *device;
4587 	struct p_rs_uuid *p = pi->data;
4588 
4589 	peer_device = conn_peer_device(connection, pi->vnr);
4590 	if (!peer_device)
4591 		return -EIO;
4592 	device = peer_device->device;
4593 
4594 	wait_event(device->misc_wait,
4595 		   device->state.conn == C_WF_SYNC_UUID ||
4596 		   device->state.conn == C_BEHIND ||
4597 		   device->state.conn < C_CONNECTED ||
4598 		   device->state.disk < D_NEGOTIATING);
4599 
4600 	/* D_ASSERT(device,  device->state.conn == C_WF_SYNC_UUID ); */
4601 
4602 	/* Here the _drbd_uuid_ functions are right, current should
4603 	   _not_ be rotated into the history */
4604 	if (get_ldev_if_state(device, D_NEGOTIATING)) {
4605 		_drbd_uuid_set(device, UI_CURRENT, be64_to_cpu(p->uuid));
4606 		_drbd_uuid_set(device, UI_BITMAP, 0UL);
4607 
4608 		drbd_print_uuids(device, "updated sync uuid");
4609 		drbd_start_resync(device, C_SYNC_TARGET);
4610 
4611 		put_ldev(device);
4612 	} else
4613 		drbd_err(device, "Ignoring SyncUUID packet!\n");
4614 
4615 	return 0;
4616 }
4617 
4618 /*
4619  * receive_bitmap_plain
4620  *
4621  * Return 0 when done, 1 when another iteration is needed, and a negative error
4622  * code upon failure.
4623  */
4624 static int
4625 receive_bitmap_plain(struct drbd_peer_device *peer_device, unsigned int size,
4626 		     unsigned long *p, struct bm_xfer_ctx *c)
4627 {
4628 	unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE -
4629 				 drbd_header_size(peer_device->connection);
4630 	unsigned int num_words = min_t(size_t, data_size / sizeof(*p),
4631 				       c->bm_words - c->word_offset);
4632 	unsigned int want = num_words * sizeof(*p);
4633 	int err;
4634 
4635 	if (want != size) {
4636 		drbd_err(peer_device, "%s:want (%u) != size (%u)\n", __func__, want, size);
4637 		return -EIO;
4638 	}
4639 	if (want == 0)
4640 		return 0;
4641 	err = drbd_recv_all(peer_device->connection, p, want);
4642 	if (err)
4643 		return err;
4644 
4645 	drbd_bm_merge_lel(peer_device->device, c->word_offset, num_words, p);
4646 
4647 	c->word_offset += num_words;
4648 	c->bit_offset = c->word_offset * BITS_PER_LONG;
4649 	if (c->bit_offset > c->bm_bits)
4650 		c->bit_offset = c->bm_bits;
4651 
4652 	return 1;
4653 }
4654 
4655 static enum drbd_bitmap_code dcbp_get_code(struct p_compressed_bm *p)
4656 {
4657 	return (enum drbd_bitmap_code)(p->encoding & 0x0f);
4658 }
4659 
4660 static int dcbp_get_start(struct p_compressed_bm *p)
4661 {
4662 	return (p->encoding & 0x80) != 0;
4663 }
4664 
4665 static int dcbp_get_pad_bits(struct p_compressed_bm *p)
4666 {
4667 	return (p->encoding >> 4) & 0x7;
4668 }
4669 
4670 /*
4671  * recv_bm_rle_bits
4672  *
4673  * Return 0 when done, 1 when another iteration is needed, and a negative error
4674  * code upon failure.
4675  */
4676 static int
4677 recv_bm_rle_bits(struct drbd_peer_device *peer_device,
4678 		struct p_compressed_bm *p,
4679 		 struct bm_xfer_ctx *c,
4680 		 unsigned int len)
4681 {
4682 	struct bitstream bs;
4683 	u64 look_ahead;
4684 	u64 rl;
4685 	u64 tmp;
4686 	unsigned long s = c->bit_offset;
4687 	unsigned long e;
4688 	int toggle = dcbp_get_start(p);
4689 	int have;
4690 	int bits;
4691 
4692 	bitstream_init(&bs, p->code, len, dcbp_get_pad_bits(p));
4693 
4694 	bits = bitstream_get_bits(&bs, &look_ahead, 64);
4695 	if (bits < 0)
4696 		return -EIO;
4697 
4698 	for (have = bits; have > 0; s += rl, toggle = !toggle) {
4699 		bits = vli_decode_bits(&rl, look_ahead);
4700 		if (bits <= 0)
4701 			return -EIO;
4702 
4703 		if (toggle) {
4704 			e = s + rl -1;
4705 			if (e >= c->bm_bits) {
4706 				drbd_err(peer_device, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e);
4707 				return -EIO;
4708 			}
4709 			_drbd_bm_set_bits(peer_device->device, s, e);
4710 		}
4711 
4712 		if (have < bits) {
4713 			drbd_err(peer_device, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
4714 				have, bits, look_ahead,
4715 				(unsigned int)(bs.cur.b - p->code),
4716 				(unsigned int)bs.buf_len);
4717 			return -EIO;
4718 		}
4719 		/* if we consumed all 64 bits, assign 0; >> 64 is "undefined"; */
4720 		if (likely(bits < 64))
4721 			look_ahead >>= bits;
4722 		else
4723 			look_ahead = 0;
4724 		have -= bits;
4725 
4726 		bits = bitstream_get_bits(&bs, &tmp, 64 - have);
4727 		if (bits < 0)
4728 			return -EIO;
4729 		look_ahead |= tmp << have;
4730 		have += bits;
4731 	}
4732 
4733 	c->bit_offset = s;
4734 	bm_xfer_ctx_bit_to_word_offset(c);
4735 
4736 	return (s != c->bm_bits);
4737 }
4738 
4739 /*
4740  * decode_bitmap_c
4741  *
4742  * Return 0 when done, 1 when another iteration is needed, and a negative error
4743  * code upon failure.
4744  */
4745 static int
4746 decode_bitmap_c(struct drbd_peer_device *peer_device,
4747 		struct p_compressed_bm *p,
4748 		struct bm_xfer_ctx *c,
4749 		unsigned int len)
4750 {
4751 	if (dcbp_get_code(p) == RLE_VLI_Bits)
4752 		return recv_bm_rle_bits(peer_device, p, c, len - sizeof(*p));
4753 
4754 	/* other variants had been implemented for evaluation,
4755 	 * but have been dropped as this one turned out to be "best"
4756 	 * during all our tests. */
4757 
4758 	drbd_err(peer_device, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
4759 	conn_request_state(peer_device->connection, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
4760 	return -EIO;
4761 }
4762 
4763 void INFO_bm_xfer_stats(struct drbd_device *device,
4764 		const char *direction, struct bm_xfer_ctx *c)
4765 {
4766 	/* what would it take to transfer it "plaintext" */
4767 	unsigned int header_size = drbd_header_size(first_peer_device(device)->connection);
4768 	unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE - header_size;
4769 	unsigned int plain =
4770 		header_size * (DIV_ROUND_UP(c->bm_words, data_size) + 1) +
4771 		c->bm_words * sizeof(unsigned long);
4772 	unsigned int total = c->bytes[0] + c->bytes[1];
4773 	unsigned int r;
4774 
4775 	/* total can not be zero. but just in case: */
4776 	if (total == 0)
4777 		return;
4778 
4779 	/* don't report if not compressed */
4780 	if (total >= plain)
4781 		return;
4782 
4783 	/* total < plain. check for overflow, still */
4784 	r = (total > UINT_MAX/1000) ? (total / (plain/1000))
4785 		                    : (1000 * total / plain);
4786 
4787 	if (r > 1000)
4788 		r = 1000;
4789 
4790 	r = 1000 - r;
4791 	drbd_info(device, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
4792 	     "total %u; compression: %u.%u%%\n",
4793 			direction,
4794 			c->bytes[1], c->packets[1],
4795 			c->bytes[0], c->packets[0],
4796 			total, r/10, r % 10);
4797 }
4798 
4799 /* Since we are processing the bitfield from lower addresses to higher,
4800    it does not matter if the process it in 32 bit chunks or 64 bit
4801    chunks as long as it is little endian. (Understand it as byte stream,
4802    beginning with the lowest byte...) If we would use big endian
4803    we would need to process it from the highest address to the lowest,
4804    in order to be agnostic to the 32 vs 64 bits issue.
4805 
4806    returns 0 on failure, 1 if we successfully received it. */
4807 static int receive_bitmap(struct drbd_connection *connection, struct packet_info *pi)
4808 {
4809 	struct drbd_peer_device *peer_device;
4810 	struct drbd_device *device;
4811 	struct bm_xfer_ctx c;
4812 	int err;
4813 
4814 	peer_device = conn_peer_device(connection, pi->vnr);
4815 	if (!peer_device)
4816 		return -EIO;
4817 	device = peer_device->device;
4818 
4819 	drbd_bm_lock(device, "receive bitmap", BM_LOCKED_SET_ALLOWED);
4820 	/* you are supposed to send additional out-of-sync information
4821 	 * if you actually set bits during this phase */
4822 
4823 	c = (struct bm_xfer_ctx) {
4824 		.bm_bits = drbd_bm_bits(device),
4825 		.bm_words = drbd_bm_words(device),
4826 	};
4827 
4828 	for(;;) {
4829 		if (pi->cmd == P_BITMAP)
4830 			err = receive_bitmap_plain(peer_device, pi->size, pi->data, &c);
4831 		else if (pi->cmd == P_COMPRESSED_BITMAP) {
4832 			/* MAYBE: sanity check that we speak proto >= 90,
4833 			 * and the feature is enabled! */
4834 			struct p_compressed_bm *p = pi->data;
4835 
4836 			if (pi->size > DRBD_SOCKET_BUFFER_SIZE - drbd_header_size(connection)) {
4837 				drbd_err(device, "ReportCBitmap packet too large\n");
4838 				err = -EIO;
4839 				goto out;
4840 			}
4841 			if (pi->size <= sizeof(*p)) {
4842 				drbd_err(device, "ReportCBitmap packet too small (l:%u)\n", pi->size);
4843 				err = -EIO;
4844 				goto out;
4845 			}
4846 			err = drbd_recv_all(peer_device->connection, p, pi->size);
4847 			if (err)
4848 			       goto out;
4849 			err = decode_bitmap_c(peer_device, p, &c, pi->size);
4850 		} else {
4851 			drbd_warn(device, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", pi->cmd);
4852 			err = -EIO;
4853 			goto out;
4854 		}
4855 
4856 		c.packets[pi->cmd == P_BITMAP]++;
4857 		c.bytes[pi->cmd == P_BITMAP] += drbd_header_size(connection) + pi->size;
4858 
4859 		if (err <= 0) {
4860 			if (err < 0)
4861 				goto out;
4862 			break;
4863 		}
4864 		err = drbd_recv_header(peer_device->connection, pi);
4865 		if (err)
4866 			goto out;
4867 	}
4868 
4869 	INFO_bm_xfer_stats(device, "receive", &c);
4870 
4871 	if (device->state.conn == C_WF_BITMAP_T) {
4872 		enum drbd_state_rv rv;
4873 
4874 		err = drbd_send_bitmap(device);
4875 		if (err)
4876 			goto out;
4877 		/* Omit CS_ORDERED with this state transition to avoid deadlocks. */
4878 		rv = _drbd_request_state(device, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
4879 		D_ASSERT(device, rv == SS_SUCCESS);
4880 	} else if (device->state.conn != C_WF_BITMAP_S) {
4881 		/* admin may have requested C_DISCONNECTING,
4882 		 * other threads may have noticed network errors */
4883 		drbd_info(device, "unexpected cstate (%s) in receive_bitmap\n",
4884 		    drbd_conn_str(device->state.conn));
4885 	}
4886 	err = 0;
4887 
4888  out:
4889 	drbd_bm_unlock(device);
4890 	if (!err && device->state.conn == C_WF_BITMAP_S)
4891 		drbd_start_resync(device, C_SYNC_SOURCE);
4892 	return err;
4893 }
4894 
4895 static int receive_skip(struct drbd_connection *connection, struct packet_info *pi)
4896 {
4897 	drbd_warn(connection, "skipping unknown optional packet type %d, l: %d!\n",
4898 		 pi->cmd, pi->size);
4899 
4900 	return ignore_remaining_packet(connection, pi);
4901 }
4902 
4903 static int receive_UnplugRemote(struct drbd_connection *connection, struct packet_info *pi)
4904 {
4905 	/* Make sure we've acked all the TCP data associated
4906 	 * with the data requests being unplugged */
4907 	tcp_sock_set_quickack(connection->data.socket->sk, 2);
4908 	return 0;
4909 }
4910 
4911 static int receive_out_of_sync(struct drbd_connection *connection, struct packet_info *pi)
4912 {
4913 	struct drbd_peer_device *peer_device;
4914 	struct drbd_device *device;
4915 	struct p_block_desc *p = pi->data;
4916 
4917 	peer_device = conn_peer_device(connection, pi->vnr);
4918 	if (!peer_device)
4919 		return -EIO;
4920 	device = peer_device->device;
4921 
4922 	switch (device->state.conn) {
4923 	case C_WF_SYNC_UUID:
4924 	case C_WF_BITMAP_T:
4925 	case C_BEHIND:
4926 			break;
4927 	default:
4928 		drbd_err(device, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n",
4929 				drbd_conn_str(device->state.conn));
4930 	}
4931 
4932 	drbd_set_out_of_sync(device, be64_to_cpu(p->sector), be32_to_cpu(p->blksize));
4933 
4934 	return 0;
4935 }
4936 
4937 static int receive_rs_deallocated(struct drbd_connection *connection, struct packet_info *pi)
4938 {
4939 	struct drbd_peer_device *peer_device;
4940 	struct p_block_desc *p = pi->data;
4941 	struct drbd_device *device;
4942 	sector_t sector;
4943 	int size, err = 0;
4944 
4945 	peer_device = conn_peer_device(connection, pi->vnr);
4946 	if (!peer_device)
4947 		return -EIO;
4948 	device = peer_device->device;
4949 
4950 	sector = be64_to_cpu(p->sector);
4951 	size = be32_to_cpu(p->blksize);
4952 
4953 	dec_rs_pending(device);
4954 
4955 	if (get_ldev(device)) {
4956 		struct drbd_peer_request *peer_req;
4957 		const int op = REQ_OP_WRITE_ZEROES;
4958 
4959 		peer_req = drbd_alloc_peer_req(peer_device, ID_SYNCER, sector,
4960 					       size, 0, GFP_NOIO);
4961 		if (!peer_req) {
4962 			put_ldev(device);
4963 			return -ENOMEM;
4964 		}
4965 
4966 		peer_req->w.cb = e_end_resync_block;
4967 		peer_req->submit_jif = jiffies;
4968 		peer_req->flags |= EE_TRIM;
4969 
4970 		spin_lock_irq(&device->resource->req_lock);
4971 		list_add_tail(&peer_req->w.list, &device->sync_ee);
4972 		spin_unlock_irq(&device->resource->req_lock);
4973 
4974 		atomic_add(pi->size >> 9, &device->rs_sect_ev);
4975 		err = drbd_submit_peer_request(device, peer_req, op, 0, DRBD_FAULT_RS_WR);
4976 
4977 		if (err) {
4978 			spin_lock_irq(&device->resource->req_lock);
4979 			list_del(&peer_req->w.list);
4980 			spin_unlock_irq(&device->resource->req_lock);
4981 
4982 			drbd_free_peer_req(device, peer_req);
4983 			put_ldev(device);
4984 			err = 0;
4985 			goto fail;
4986 		}
4987 
4988 		inc_unacked(device);
4989 
4990 		/* No put_ldev() here. Gets called in drbd_endio_write_sec_final(),
4991 		   as well as drbd_rs_complete_io() */
4992 	} else {
4993 	fail:
4994 		drbd_rs_complete_io(device, sector);
4995 		drbd_send_ack_ex(peer_device, P_NEG_ACK, sector, size, ID_SYNCER);
4996 	}
4997 
4998 	atomic_add(size >> 9, &device->rs_sect_in);
4999 
5000 	return err;
5001 }
5002 
5003 struct data_cmd {
5004 	int expect_payload;
5005 	unsigned int pkt_size;
5006 	int (*fn)(struct drbd_connection *, struct packet_info *);
5007 };
5008 
5009 static struct data_cmd drbd_cmd_handler[] = {
5010 	[P_DATA]	    = { 1, sizeof(struct p_data), receive_Data },
5011 	[P_DATA_REPLY]	    = { 1, sizeof(struct p_data), receive_DataReply },
5012 	[P_RS_DATA_REPLY]   = { 1, sizeof(struct p_data), receive_RSDataReply } ,
5013 	[P_BARRIER]	    = { 0, sizeof(struct p_barrier), receive_Barrier } ,
5014 	[P_BITMAP]	    = { 1, 0, receive_bitmap } ,
5015 	[P_COMPRESSED_BITMAP] = { 1, 0, receive_bitmap } ,
5016 	[P_UNPLUG_REMOTE]   = { 0, 0, receive_UnplugRemote },
5017 	[P_DATA_REQUEST]    = { 0, sizeof(struct p_block_req), receive_DataRequest },
5018 	[P_RS_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
5019 	[P_SYNC_PARAM]	    = { 1, 0, receive_SyncParam },
5020 	[P_SYNC_PARAM89]    = { 1, 0, receive_SyncParam },
5021 	[P_PROTOCOL]        = { 1, sizeof(struct p_protocol), receive_protocol },
5022 	[P_UUIDS]	    = { 0, sizeof(struct p_uuids), receive_uuids },
5023 	[P_SIZES]	    = { 0, sizeof(struct p_sizes), receive_sizes },
5024 	[P_STATE]	    = { 0, sizeof(struct p_state), receive_state },
5025 	[P_STATE_CHG_REQ]   = { 0, sizeof(struct p_req_state), receive_req_state },
5026 	[P_SYNC_UUID]       = { 0, sizeof(struct p_rs_uuid), receive_sync_uuid },
5027 	[P_OV_REQUEST]      = { 0, sizeof(struct p_block_req), receive_DataRequest },
5028 	[P_OV_REPLY]        = { 1, sizeof(struct p_block_req), receive_DataRequest },
5029 	[P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest },
5030 	[P_RS_THIN_REQ]     = { 0, sizeof(struct p_block_req), receive_DataRequest },
5031 	[P_DELAY_PROBE]     = { 0, sizeof(struct p_delay_probe93), receive_skip },
5032 	[P_OUT_OF_SYNC]     = { 0, sizeof(struct p_block_desc), receive_out_of_sync },
5033 	[P_CONN_ST_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_conn_state },
5034 	[P_PROTOCOL_UPDATE] = { 1, sizeof(struct p_protocol), receive_protocol },
5035 	[P_TRIM]	    = { 0, sizeof(struct p_trim), receive_Data },
5036 	[P_ZEROES]	    = { 0, sizeof(struct p_trim), receive_Data },
5037 	[P_RS_DEALLOCATED]  = { 0, sizeof(struct p_block_desc), receive_rs_deallocated },
5038 };
5039 
5040 static void drbdd(struct drbd_connection *connection)
5041 {
5042 	struct packet_info pi;
5043 	size_t shs; /* sub header size */
5044 	int err;
5045 
5046 	while (get_t_state(&connection->receiver) == RUNNING) {
5047 		struct data_cmd const *cmd;
5048 
5049 		drbd_thread_current_set_cpu(&connection->receiver);
5050 		update_receiver_timing_details(connection, drbd_recv_header_maybe_unplug);
5051 		if (drbd_recv_header_maybe_unplug(connection, &pi))
5052 			goto err_out;
5053 
5054 		cmd = &drbd_cmd_handler[pi.cmd];
5055 		if (unlikely(pi.cmd >= ARRAY_SIZE(drbd_cmd_handler) || !cmd->fn)) {
5056 			drbd_err(connection, "Unexpected data packet %s (0x%04x)",
5057 				 cmdname(pi.cmd), pi.cmd);
5058 			goto err_out;
5059 		}
5060 
5061 		shs = cmd->pkt_size;
5062 		if (pi.cmd == P_SIZES && connection->agreed_features & DRBD_FF_WSAME)
5063 			shs += sizeof(struct o_qlim);
5064 		if (pi.size > shs && !cmd->expect_payload) {
5065 			drbd_err(connection, "No payload expected %s l:%d\n",
5066 				 cmdname(pi.cmd), pi.size);
5067 			goto err_out;
5068 		}
5069 		if (pi.size < shs) {
5070 			drbd_err(connection, "%s: unexpected packet size, expected:%d received:%d\n",
5071 				 cmdname(pi.cmd), (int)shs, pi.size);
5072 			goto err_out;
5073 		}
5074 
5075 		if (shs) {
5076 			update_receiver_timing_details(connection, drbd_recv_all_warn);
5077 			err = drbd_recv_all_warn(connection, pi.data, shs);
5078 			if (err)
5079 				goto err_out;
5080 			pi.size -= shs;
5081 		}
5082 
5083 		update_receiver_timing_details(connection, cmd->fn);
5084 		err = cmd->fn(connection, &pi);
5085 		if (err) {
5086 			drbd_err(connection, "error receiving %s, e: %d l: %d!\n",
5087 				 cmdname(pi.cmd), err, pi.size);
5088 			goto err_out;
5089 		}
5090 	}
5091 	return;
5092 
5093     err_out:
5094 	conn_request_state(connection, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
5095 }
5096 
5097 static void conn_disconnect(struct drbd_connection *connection)
5098 {
5099 	struct drbd_peer_device *peer_device;
5100 	enum drbd_conns oc;
5101 	int vnr;
5102 
5103 	if (connection->cstate == C_STANDALONE)
5104 		return;
5105 
5106 	/* We are about to start the cleanup after connection loss.
5107 	 * Make sure drbd_make_request knows about that.
5108 	 * Usually we should be in some network failure state already,
5109 	 * but just in case we are not, we fix it up here.
5110 	 */
5111 	conn_request_state(connection, NS(conn, C_NETWORK_FAILURE), CS_HARD);
5112 
5113 	/* ack_receiver does not clean up anything. it must not interfere, either */
5114 	drbd_thread_stop(&connection->ack_receiver);
5115 	if (connection->ack_sender) {
5116 		destroy_workqueue(connection->ack_sender);
5117 		connection->ack_sender = NULL;
5118 	}
5119 	drbd_free_sock(connection);
5120 
5121 	rcu_read_lock();
5122 	idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
5123 		struct drbd_device *device = peer_device->device;
5124 		kref_get(&device->kref);
5125 		rcu_read_unlock();
5126 		drbd_disconnected(peer_device);
5127 		kref_put(&device->kref, drbd_destroy_device);
5128 		rcu_read_lock();
5129 	}
5130 	rcu_read_unlock();
5131 
5132 	if (!list_empty(&connection->current_epoch->list))
5133 		drbd_err(connection, "ASSERTION FAILED: connection->current_epoch->list not empty\n");
5134 	/* ok, no more ee's on the fly, it is safe to reset the epoch_size */
5135 	atomic_set(&connection->current_epoch->epoch_size, 0);
5136 	connection->send.seen_any_write_yet = false;
5137 
5138 	drbd_info(connection, "Connection closed\n");
5139 
5140 	if (conn_highest_role(connection) == R_PRIMARY && conn_highest_pdsk(connection) >= D_UNKNOWN)
5141 		conn_try_outdate_peer_async(connection);
5142 
5143 	spin_lock_irq(&connection->resource->req_lock);
5144 	oc = connection->cstate;
5145 	if (oc >= C_UNCONNECTED)
5146 		_conn_request_state(connection, NS(conn, C_UNCONNECTED), CS_VERBOSE);
5147 
5148 	spin_unlock_irq(&connection->resource->req_lock);
5149 
5150 	if (oc == C_DISCONNECTING)
5151 		conn_request_state(connection, NS(conn, C_STANDALONE), CS_VERBOSE | CS_HARD);
5152 }
5153 
5154 static int drbd_disconnected(struct drbd_peer_device *peer_device)
5155 {
5156 	struct drbd_device *device = peer_device->device;
5157 	unsigned int i;
5158 
5159 	/* wait for current activity to cease. */
5160 	spin_lock_irq(&device->resource->req_lock);
5161 	_drbd_wait_ee_list_empty(device, &device->active_ee);
5162 	_drbd_wait_ee_list_empty(device, &device->sync_ee);
5163 	_drbd_wait_ee_list_empty(device, &device->read_ee);
5164 	spin_unlock_irq(&device->resource->req_lock);
5165 
5166 	/* We do not have data structures that would allow us to
5167 	 * get the rs_pending_cnt down to 0 again.
5168 	 *  * On C_SYNC_TARGET we do not have any data structures describing
5169 	 *    the pending RSDataRequest's we have sent.
5170 	 *  * On C_SYNC_SOURCE there is no data structure that tracks
5171 	 *    the P_RS_DATA_REPLY blocks that we sent to the SyncTarget.
5172 	 *  And no, it is not the sum of the reference counts in the
5173 	 *  resync_LRU. The resync_LRU tracks the whole operation including
5174 	 *  the disk-IO, while the rs_pending_cnt only tracks the blocks
5175 	 *  on the fly. */
5176 	drbd_rs_cancel_all(device);
5177 	device->rs_total = 0;
5178 	device->rs_failed = 0;
5179 	atomic_set(&device->rs_pending_cnt, 0);
5180 	wake_up(&device->misc_wait);
5181 
5182 	del_timer_sync(&device->resync_timer);
5183 	resync_timer_fn(&device->resync_timer);
5184 
5185 	/* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
5186 	 * w_make_resync_request etc. which may still be on the worker queue
5187 	 * to be "canceled" */
5188 	drbd_flush_workqueue(&peer_device->connection->sender_work);
5189 
5190 	drbd_finish_peer_reqs(device);
5191 
5192 	/* This second workqueue flush is necessary, since drbd_finish_peer_reqs()
5193 	   might have issued a work again. The one before drbd_finish_peer_reqs() is
5194 	   necessary to reclain net_ee in drbd_finish_peer_reqs(). */
5195 	drbd_flush_workqueue(&peer_device->connection->sender_work);
5196 
5197 	/* need to do it again, drbd_finish_peer_reqs() may have populated it
5198 	 * again via drbd_try_clear_on_disk_bm(). */
5199 	drbd_rs_cancel_all(device);
5200 
5201 	kfree(device->p_uuid);
5202 	device->p_uuid = NULL;
5203 
5204 	if (!drbd_suspended(device))
5205 		tl_clear(peer_device->connection);
5206 
5207 	drbd_md_sync(device);
5208 
5209 	if (get_ldev(device)) {
5210 		drbd_bitmap_io(device, &drbd_bm_write_copy_pages,
5211 				"write from disconnected", BM_LOCKED_CHANGE_ALLOWED);
5212 		put_ldev(device);
5213 	}
5214 
5215 	/* tcp_close and release of sendpage pages can be deferred.  I don't
5216 	 * want to use SO_LINGER, because apparently it can be deferred for
5217 	 * more than 20 seconds (longest time I checked).
5218 	 *
5219 	 * Actually we don't care for exactly when the network stack does its
5220 	 * put_page(), but release our reference on these pages right here.
5221 	 */
5222 	i = drbd_free_peer_reqs(device, &device->net_ee);
5223 	if (i)
5224 		drbd_info(device, "net_ee not empty, killed %u entries\n", i);
5225 	i = atomic_read(&device->pp_in_use_by_net);
5226 	if (i)
5227 		drbd_info(device, "pp_in_use_by_net = %d, expected 0\n", i);
5228 	i = atomic_read(&device->pp_in_use);
5229 	if (i)
5230 		drbd_info(device, "pp_in_use = %d, expected 0\n", i);
5231 
5232 	D_ASSERT(device, list_empty(&device->read_ee));
5233 	D_ASSERT(device, list_empty(&device->active_ee));
5234 	D_ASSERT(device, list_empty(&device->sync_ee));
5235 	D_ASSERT(device, list_empty(&device->done_ee));
5236 
5237 	return 0;
5238 }
5239 
5240 /*
5241  * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version
5242  * we can agree on is stored in agreed_pro_version.
5243  *
5244  * feature flags and the reserved array should be enough room for future
5245  * enhancements of the handshake protocol, and possible plugins...
5246  *
5247  * for now, they are expected to be zero, but ignored.
5248  */
5249 static int drbd_send_features(struct drbd_connection *connection)
5250 {
5251 	struct drbd_socket *sock;
5252 	struct p_connection_features *p;
5253 
5254 	sock = &connection->data;
5255 	p = conn_prepare_command(connection, sock);
5256 	if (!p)
5257 		return -EIO;
5258 	memset(p, 0, sizeof(*p));
5259 	p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
5260 	p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
5261 	p->feature_flags = cpu_to_be32(PRO_FEATURES);
5262 	return conn_send_command(connection, sock, P_CONNECTION_FEATURES, sizeof(*p), NULL, 0);
5263 }
5264 
5265 /*
5266  * return values:
5267  *   1 yes, we have a valid connection
5268  *   0 oops, did not work out, please try again
5269  *  -1 peer talks different language,
5270  *     no point in trying again, please go standalone.
5271  */
5272 static int drbd_do_features(struct drbd_connection *connection)
5273 {
5274 	/* ASSERT current == connection->receiver ... */
5275 	struct p_connection_features *p;
5276 	const int expect = sizeof(struct p_connection_features);
5277 	struct packet_info pi;
5278 	int err;
5279 
5280 	err = drbd_send_features(connection);
5281 	if (err)
5282 		return 0;
5283 
5284 	err = drbd_recv_header(connection, &pi);
5285 	if (err)
5286 		return 0;
5287 
5288 	if (pi.cmd != P_CONNECTION_FEATURES) {
5289 		drbd_err(connection, "expected ConnectionFeatures packet, received: %s (0x%04x)\n",
5290 			 cmdname(pi.cmd), pi.cmd);
5291 		return -1;
5292 	}
5293 
5294 	if (pi.size != expect) {
5295 		drbd_err(connection, "expected ConnectionFeatures length: %u, received: %u\n",
5296 		     expect, pi.size);
5297 		return -1;
5298 	}
5299 
5300 	p = pi.data;
5301 	err = drbd_recv_all_warn(connection, p, expect);
5302 	if (err)
5303 		return 0;
5304 
5305 	p->protocol_min = be32_to_cpu(p->protocol_min);
5306 	p->protocol_max = be32_to_cpu(p->protocol_max);
5307 	if (p->protocol_max == 0)
5308 		p->protocol_max = p->protocol_min;
5309 
5310 	if (PRO_VERSION_MAX < p->protocol_min ||
5311 	    PRO_VERSION_MIN > p->protocol_max)
5312 		goto incompat;
5313 
5314 	connection->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
5315 	connection->agreed_features = PRO_FEATURES & be32_to_cpu(p->feature_flags);
5316 
5317 	drbd_info(connection, "Handshake successful: "
5318 	     "Agreed network protocol version %d\n", connection->agreed_pro_version);
5319 
5320 	drbd_info(connection, "Feature flags enabled on protocol level: 0x%x%s%s%s%s.\n",
5321 		  connection->agreed_features,
5322 		  connection->agreed_features & DRBD_FF_TRIM ? " TRIM" : "",
5323 		  connection->agreed_features & DRBD_FF_THIN_RESYNC ? " THIN_RESYNC" : "",
5324 		  connection->agreed_features & DRBD_FF_WSAME ? " WRITE_SAME" : "",
5325 		  connection->agreed_features & DRBD_FF_WZEROES ? " WRITE_ZEROES" :
5326 		  connection->agreed_features ? "" : " none");
5327 
5328 	return 1;
5329 
5330  incompat:
5331 	drbd_err(connection, "incompatible DRBD dialects: "
5332 	    "I support %d-%d, peer supports %d-%d\n",
5333 	    PRO_VERSION_MIN, PRO_VERSION_MAX,
5334 	    p->protocol_min, p->protocol_max);
5335 	return -1;
5336 }
5337 
5338 #if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
5339 static int drbd_do_auth(struct drbd_connection *connection)
5340 {
5341 	drbd_err(connection, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
5342 	drbd_err(connection, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
5343 	return -1;
5344 }
5345 #else
5346 #define CHALLENGE_LEN 64
5347 
5348 /* Return value:
5349 	1 - auth succeeded,
5350 	0 - failed, try again (network error),
5351 	-1 - auth failed, don't try again.
5352 */
5353 
5354 static int drbd_do_auth(struct drbd_connection *connection)
5355 {
5356 	struct drbd_socket *sock;
5357 	char my_challenge[CHALLENGE_LEN];  /* 64 Bytes... */
5358 	char *response = NULL;
5359 	char *right_response = NULL;
5360 	char *peers_ch = NULL;
5361 	unsigned int key_len;
5362 	char secret[SHARED_SECRET_MAX]; /* 64 byte */
5363 	unsigned int resp_size;
5364 	struct shash_desc *desc;
5365 	struct packet_info pi;
5366 	struct net_conf *nc;
5367 	int err, rv;
5368 
5369 	/* FIXME: Put the challenge/response into the preallocated socket buffer.  */
5370 
5371 	rcu_read_lock();
5372 	nc = rcu_dereference(connection->net_conf);
5373 	key_len = strlen(nc->shared_secret);
5374 	memcpy(secret, nc->shared_secret, key_len);
5375 	rcu_read_unlock();
5376 
5377 	desc = kmalloc(sizeof(struct shash_desc) +
5378 		       crypto_shash_descsize(connection->cram_hmac_tfm),
5379 		       GFP_KERNEL);
5380 	if (!desc) {
5381 		rv = -1;
5382 		goto fail;
5383 	}
5384 	desc->tfm = connection->cram_hmac_tfm;
5385 
5386 	rv = crypto_shash_setkey(connection->cram_hmac_tfm, (u8 *)secret, key_len);
5387 	if (rv) {
5388 		drbd_err(connection, "crypto_shash_setkey() failed with %d\n", rv);
5389 		rv = -1;
5390 		goto fail;
5391 	}
5392 
5393 	get_random_bytes(my_challenge, CHALLENGE_LEN);
5394 
5395 	sock = &connection->data;
5396 	if (!conn_prepare_command(connection, sock)) {
5397 		rv = 0;
5398 		goto fail;
5399 	}
5400 	rv = !conn_send_command(connection, sock, P_AUTH_CHALLENGE, 0,
5401 				my_challenge, CHALLENGE_LEN);
5402 	if (!rv)
5403 		goto fail;
5404 
5405 	err = drbd_recv_header(connection, &pi);
5406 	if (err) {
5407 		rv = 0;
5408 		goto fail;
5409 	}
5410 
5411 	if (pi.cmd != P_AUTH_CHALLENGE) {
5412 		drbd_err(connection, "expected AuthChallenge packet, received: %s (0x%04x)\n",
5413 			 cmdname(pi.cmd), pi.cmd);
5414 		rv = -1;
5415 		goto fail;
5416 	}
5417 
5418 	if (pi.size > CHALLENGE_LEN * 2) {
5419 		drbd_err(connection, "expected AuthChallenge payload too big.\n");
5420 		rv = -1;
5421 		goto fail;
5422 	}
5423 
5424 	if (pi.size < CHALLENGE_LEN) {
5425 		drbd_err(connection, "AuthChallenge payload too small.\n");
5426 		rv = -1;
5427 		goto fail;
5428 	}
5429 
5430 	peers_ch = kmalloc(pi.size, GFP_NOIO);
5431 	if (!peers_ch) {
5432 		rv = -1;
5433 		goto fail;
5434 	}
5435 
5436 	err = drbd_recv_all_warn(connection, peers_ch, pi.size);
5437 	if (err) {
5438 		rv = 0;
5439 		goto fail;
5440 	}
5441 
5442 	if (!memcmp(my_challenge, peers_ch, CHALLENGE_LEN)) {
5443 		drbd_err(connection, "Peer presented the same challenge!\n");
5444 		rv = -1;
5445 		goto fail;
5446 	}
5447 
5448 	resp_size = crypto_shash_digestsize(connection->cram_hmac_tfm);
5449 	response = kmalloc(resp_size, GFP_NOIO);
5450 	if (!response) {
5451 		rv = -1;
5452 		goto fail;
5453 	}
5454 
5455 	rv = crypto_shash_digest(desc, peers_ch, pi.size, response);
5456 	if (rv) {
5457 		drbd_err(connection, "crypto_hash_digest() failed with %d\n", rv);
5458 		rv = -1;
5459 		goto fail;
5460 	}
5461 
5462 	if (!conn_prepare_command(connection, sock)) {
5463 		rv = 0;
5464 		goto fail;
5465 	}
5466 	rv = !conn_send_command(connection, sock, P_AUTH_RESPONSE, 0,
5467 				response, resp_size);
5468 	if (!rv)
5469 		goto fail;
5470 
5471 	err = drbd_recv_header(connection, &pi);
5472 	if (err) {
5473 		rv = 0;
5474 		goto fail;
5475 	}
5476 
5477 	if (pi.cmd != P_AUTH_RESPONSE) {
5478 		drbd_err(connection, "expected AuthResponse packet, received: %s (0x%04x)\n",
5479 			 cmdname(pi.cmd), pi.cmd);
5480 		rv = 0;
5481 		goto fail;
5482 	}
5483 
5484 	if (pi.size != resp_size) {
5485 		drbd_err(connection, "expected AuthResponse payload of wrong size\n");
5486 		rv = 0;
5487 		goto fail;
5488 	}
5489 
5490 	err = drbd_recv_all_warn(connection, response , resp_size);
5491 	if (err) {
5492 		rv = 0;
5493 		goto fail;
5494 	}
5495 
5496 	right_response = kmalloc(resp_size, GFP_NOIO);
5497 	if (!right_response) {
5498 		rv = -1;
5499 		goto fail;
5500 	}
5501 
5502 	rv = crypto_shash_digest(desc, my_challenge, CHALLENGE_LEN,
5503 				 right_response);
5504 	if (rv) {
5505 		drbd_err(connection, "crypto_hash_digest() failed with %d\n", rv);
5506 		rv = -1;
5507 		goto fail;
5508 	}
5509 
5510 	rv = !memcmp(response, right_response, resp_size);
5511 
5512 	if (rv)
5513 		drbd_info(connection, "Peer authenticated using %d bytes HMAC\n",
5514 		     resp_size);
5515 	else
5516 		rv = -1;
5517 
5518  fail:
5519 	kfree(peers_ch);
5520 	kfree(response);
5521 	kfree(right_response);
5522 	if (desc) {
5523 		shash_desc_zero(desc);
5524 		kfree(desc);
5525 	}
5526 
5527 	return rv;
5528 }
5529 #endif
5530 
5531 int drbd_receiver(struct drbd_thread *thi)
5532 {
5533 	struct drbd_connection *connection = thi->connection;
5534 	int h;
5535 
5536 	drbd_info(connection, "receiver (re)started\n");
5537 
5538 	do {
5539 		h = conn_connect(connection);
5540 		if (h == 0) {
5541 			conn_disconnect(connection);
5542 			schedule_timeout_interruptible(HZ);
5543 		}
5544 		if (h == -1) {
5545 			drbd_warn(connection, "Discarding network configuration.\n");
5546 			conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
5547 		}
5548 	} while (h == 0);
5549 
5550 	if (h > 0) {
5551 		blk_start_plug(&connection->receiver_plug);
5552 		drbdd(connection);
5553 		blk_finish_plug(&connection->receiver_plug);
5554 	}
5555 
5556 	conn_disconnect(connection);
5557 
5558 	drbd_info(connection, "receiver terminated\n");
5559 	return 0;
5560 }
5561 
5562 /* ********* acknowledge sender ******** */
5563 
5564 static int got_conn_RqSReply(struct drbd_connection *connection, struct packet_info *pi)
5565 {
5566 	struct p_req_state_reply *p = pi->data;
5567 	int retcode = be32_to_cpu(p->retcode);
5568 
5569 	if (retcode >= SS_SUCCESS) {
5570 		set_bit(CONN_WD_ST_CHG_OKAY, &connection->flags);
5571 	} else {
5572 		set_bit(CONN_WD_ST_CHG_FAIL, &connection->flags);
5573 		drbd_err(connection, "Requested state change failed by peer: %s (%d)\n",
5574 			 drbd_set_st_err_str(retcode), retcode);
5575 	}
5576 	wake_up(&connection->ping_wait);
5577 
5578 	return 0;
5579 }
5580 
5581 static int got_RqSReply(struct drbd_connection *connection, struct packet_info *pi)
5582 {
5583 	struct drbd_peer_device *peer_device;
5584 	struct drbd_device *device;
5585 	struct p_req_state_reply *p = pi->data;
5586 	int retcode = be32_to_cpu(p->retcode);
5587 
5588 	peer_device = conn_peer_device(connection, pi->vnr);
5589 	if (!peer_device)
5590 		return -EIO;
5591 	device = peer_device->device;
5592 
5593 	if (test_bit(CONN_WD_ST_CHG_REQ, &connection->flags)) {
5594 		D_ASSERT(device, connection->agreed_pro_version < 100);
5595 		return got_conn_RqSReply(connection, pi);
5596 	}
5597 
5598 	if (retcode >= SS_SUCCESS) {
5599 		set_bit(CL_ST_CHG_SUCCESS, &device->flags);
5600 	} else {
5601 		set_bit(CL_ST_CHG_FAIL, &device->flags);
5602 		drbd_err(device, "Requested state change failed by peer: %s (%d)\n",
5603 			drbd_set_st_err_str(retcode), retcode);
5604 	}
5605 	wake_up(&device->state_wait);
5606 
5607 	return 0;
5608 }
5609 
5610 static int got_Ping(struct drbd_connection *connection, struct packet_info *pi)
5611 {
5612 	return drbd_send_ping_ack(connection);
5613 
5614 }
5615 
5616 static int got_PingAck(struct drbd_connection *connection, struct packet_info *pi)
5617 {
5618 	/* restore idle timeout */
5619 	connection->meta.socket->sk->sk_rcvtimeo = connection->net_conf->ping_int*HZ;
5620 	if (!test_and_set_bit(GOT_PING_ACK, &connection->flags))
5621 		wake_up(&connection->ping_wait);
5622 
5623 	return 0;
5624 }
5625 
5626 static int got_IsInSync(struct drbd_connection *connection, struct packet_info *pi)
5627 {
5628 	struct drbd_peer_device *peer_device;
5629 	struct drbd_device *device;
5630 	struct p_block_ack *p = pi->data;
5631 	sector_t sector = be64_to_cpu(p->sector);
5632 	int blksize = be32_to_cpu(p->blksize);
5633 
5634 	peer_device = conn_peer_device(connection, pi->vnr);
5635 	if (!peer_device)
5636 		return -EIO;
5637 	device = peer_device->device;
5638 
5639 	D_ASSERT(device, peer_device->connection->agreed_pro_version >= 89);
5640 
5641 	update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
5642 
5643 	if (get_ldev(device)) {
5644 		drbd_rs_complete_io(device, sector);
5645 		drbd_set_in_sync(device, sector, blksize);
5646 		/* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
5647 		device->rs_same_csum += (blksize >> BM_BLOCK_SHIFT);
5648 		put_ldev(device);
5649 	}
5650 	dec_rs_pending(device);
5651 	atomic_add(blksize >> 9, &device->rs_sect_in);
5652 
5653 	return 0;
5654 }
5655 
5656 static int
5657 validate_req_change_req_state(struct drbd_device *device, u64 id, sector_t sector,
5658 			      struct rb_root *root, const char *func,
5659 			      enum drbd_req_event what, bool missing_ok)
5660 {
5661 	struct drbd_request *req;
5662 	struct bio_and_error m;
5663 
5664 	spin_lock_irq(&device->resource->req_lock);
5665 	req = find_request(device, root, id, sector, missing_ok, func);
5666 	if (unlikely(!req)) {
5667 		spin_unlock_irq(&device->resource->req_lock);
5668 		return -EIO;
5669 	}
5670 	__req_mod(req, what, &m);
5671 	spin_unlock_irq(&device->resource->req_lock);
5672 
5673 	if (m.bio)
5674 		complete_master_bio(device, &m);
5675 	return 0;
5676 }
5677 
5678 static int got_BlockAck(struct drbd_connection *connection, struct packet_info *pi)
5679 {
5680 	struct drbd_peer_device *peer_device;
5681 	struct drbd_device *device;
5682 	struct p_block_ack *p = pi->data;
5683 	sector_t sector = be64_to_cpu(p->sector);
5684 	int blksize = be32_to_cpu(p->blksize);
5685 	enum drbd_req_event what;
5686 
5687 	peer_device = conn_peer_device(connection, pi->vnr);
5688 	if (!peer_device)
5689 		return -EIO;
5690 	device = peer_device->device;
5691 
5692 	update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
5693 
5694 	if (p->block_id == ID_SYNCER) {
5695 		drbd_set_in_sync(device, sector, blksize);
5696 		dec_rs_pending(device);
5697 		return 0;
5698 	}
5699 	switch (pi->cmd) {
5700 	case P_RS_WRITE_ACK:
5701 		what = WRITE_ACKED_BY_PEER_AND_SIS;
5702 		break;
5703 	case P_WRITE_ACK:
5704 		what = WRITE_ACKED_BY_PEER;
5705 		break;
5706 	case P_RECV_ACK:
5707 		what = RECV_ACKED_BY_PEER;
5708 		break;
5709 	case P_SUPERSEDED:
5710 		what = CONFLICT_RESOLVED;
5711 		break;
5712 	case P_RETRY_WRITE:
5713 		what = POSTPONE_WRITE;
5714 		break;
5715 	default:
5716 		BUG();
5717 	}
5718 
5719 	return validate_req_change_req_state(device, p->block_id, sector,
5720 					     &device->write_requests, __func__,
5721 					     what, false);
5722 }
5723 
5724 static int got_NegAck(struct drbd_connection *connection, struct packet_info *pi)
5725 {
5726 	struct drbd_peer_device *peer_device;
5727 	struct drbd_device *device;
5728 	struct p_block_ack *p = pi->data;
5729 	sector_t sector = be64_to_cpu(p->sector);
5730 	int size = be32_to_cpu(p->blksize);
5731 	int err;
5732 
5733 	peer_device = conn_peer_device(connection, pi->vnr);
5734 	if (!peer_device)
5735 		return -EIO;
5736 	device = peer_device->device;
5737 
5738 	update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
5739 
5740 	if (p->block_id == ID_SYNCER) {
5741 		dec_rs_pending(device);
5742 		drbd_rs_failed_io(device, sector, size);
5743 		return 0;
5744 	}
5745 
5746 	err = validate_req_change_req_state(device, p->block_id, sector,
5747 					    &device->write_requests, __func__,
5748 					    NEG_ACKED, true);
5749 	if (err) {
5750 		/* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs.
5751 		   The master bio might already be completed, therefore the
5752 		   request is no longer in the collision hash. */
5753 		/* In Protocol B we might already have got a P_RECV_ACK
5754 		   but then get a P_NEG_ACK afterwards. */
5755 		drbd_set_out_of_sync(device, sector, size);
5756 	}
5757 	return 0;
5758 }
5759 
5760 static int got_NegDReply(struct drbd_connection *connection, struct packet_info *pi)
5761 {
5762 	struct drbd_peer_device *peer_device;
5763 	struct drbd_device *device;
5764 	struct p_block_ack *p = pi->data;
5765 	sector_t sector = be64_to_cpu(p->sector);
5766 
5767 	peer_device = conn_peer_device(connection, pi->vnr);
5768 	if (!peer_device)
5769 		return -EIO;
5770 	device = peer_device->device;
5771 
5772 	update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
5773 
5774 	drbd_err(device, "Got NegDReply; Sector %llus, len %u.\n",
5775 	    (unsigned long long)sector, be32_to_cpu(p->blksize));
5776 
5777 	return validate_req_change_req_state(device, p->block_id, sector,
5778 					     &device->read_requests, __func__,
5779 					     NEG_ACKED, false);
5780 }
5781 
5782 static int got_NegRSDReply(struct drbd_connection *connection, struct packet_info *pi)
5783 {
5784 	struct drbd_peer_device *peer_device;
5785 	struct drbd_device *device;
5786 	sector_t sector;
5787 	int size;
5788 	struct p_block_ack *p = pi->data;
5789 
5790 	peer_device = conn_peer_device(connection, pi->vnr);
5791 	if (!peer_device)
5792 		return -EIO;
5793 	device = peer_device->device;
5794 
5795 	sector = be64_to_cpu(p->sector);
5796 	size = be32_to_cpu(p->blksize);
5797 
5798 	update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
5799 
5800 	dec_rs_pending(device);
5801 
5802 	if (get_ldev_if_state(device, D_FAILED)) {
5803 		drbd_rs_complete_io(device, sector);
5804 		switch (pi->cmd) {
5805 		case P_NEG_RS_DREPLY:
5806 			drbd_rs_failed_io(device, sector, size);
5807 			break;
5808 		case P_RS_CANCEL:
5809 			break;
5810 		default:
5811 			BUG();
5812 		}
5813 		put_ldev(device);
5814 	}
5815 
5816 	return 0;
5817 }
5818 
5819 static int got_BarrierAck(struct drbd_connection *connection, struct packet_info *pi)
5820 {
5821 	struct p_barrier_ack *p = pi->data;
5822 	struct drbd_peer_device *peer_device;
5823 	int vnr;
5824 
5825 	tl_release(connection, p->barrier, be32_to_cpu(p->set_size));
5826 
5827 	rcu_read_lock();
5828 	idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
5829 		struct drbd_device *device = peer_device->device;
5830 
5831 		if (device->state.conn == C_AHEAD &&
5832 		    atomic_read(&device->ap_in_flight) == 0 &&
5833 		    !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &device->flags)) {
5834 			device->start_resync_timer.expires = jiffies + HZ;
5835 			add_timer(&device->start_resync_timer);
5836 		}
5837 	}
5838 	rcu_read_unlock();
5839 
5840 	return 0;
5841 }
5842 
5843 static int got_OVResult(struct drbd_connection *connection, struct packet_info *pi)
5844 {
5845 	struct drbd_peer_device *peer_device;
5846 	struct drbd_device *device;
5847 	struct p_block_ack *p = pi->data;
5848 	struct drbd_device_work *dw;
5849 	sector_t sector;
5850 	int size;
5851 
5852 	peer_device = conn_peer_device(connection, pi->vnr);
5853 	if (!peer_device)
5854 		return -EIO;
5855 	device = peer_device->device;
5856 
5857 	sector = be64_to_cpu(p->sector);
5858 	size = be32_to_cpu(p->blksize);
5859 
5860 	update_peer_seq(peer_device, be32_to_cpu(p->seq_num));
5861 
5862 	if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC)
5863 		drbd_ov_out_of_sync_found(device, sector, size);
5864 	else
5865 		ov_out_of_sync_print(device);
5866 
5867 	if (!get_ldev(device))
5868 		return 0;
5869 
5870 	drbd_rs_complete_io(device, sector);
5871 	dec_rs_pending(device);
5872 
5873 	--device->ov_left;
5874 
5875 	/* let's advance progress step marks only for every other megabyte */
5876 	if ((device->ov_left & 0x200) == 0x200)
5877 		drbd_advance_rs_marks(device, device->ov_left);
5878 
5879 	if (device->ov_left == 0) {
5880 		dw = kmalloc(sizeof(*dw), GFP_NOIO);
5881 		if (dw) {
5882 			dw->w.cb = w_ov_finished;
5883 			dw->device = device;
5884 			drbd_queue_work(&peer_device->connection->sender_work, &dw->w);
5885 		} else {
5886 			drbd_err(device, "kmalloc(dw) failed.");
5887 			ov_out_of_sync_print(device);
5888 			drbd_resync_finished(device);
5889 		}
5890 	}
5891 	put_ldev(device);
5892 	return 0;
5893 }
5894 
5895 static int got_skip(struct drbd_connection *connection, struct packet_info *pi)
5896 {
5897 	return 0;
5898 }
5899 
5900 struct meta_sock_cmd {
5901 	size_t pkt_size;
5902 	int (*fn)(struct drbd_connection *connection, struct packet_info *);
5903 };
5904 
5905 static void set_rcvtimeo(struct drbd_connection *connection, bool ping_timeout)
5906 {
5907 	long t;
5908 	struct net_conf *nc;
5909 
5910 	rcu_read_lock();
5911 	nc = rcu_dereference(connection->net_conf);
5912 	t = ping_timeout ? nc->ping_timeo : nc->ping_int;
5913 	rcu_read_unlock();
5914 
5915 	t *= HZ;
5916 	if (ping_timeout)
5917 		t /= 10;
5918 
5919 	connection->meta.socket->sk->sk_rcvtimeo = t;
5920 }
5921 
5922 static void set_ping_timeout(struct drbd_connection *connection)
5923 {
5924 	set_rcvtimeo(connection, 1);
5925 }
5926 
5927 static void set_idle_timeout(struct drbd_connection *connection)
5928 {
5929 	set_rcvtimeo(connection, 0);
5930 }
5931 
5932 static struct meta_sock_cmd ack_receiver_tbl[] = {
5933 	[P_PING]	    = { 0, got_Ping },
5934 	[P_PING_ACK]	    = { 0, got_PingAck },
5935 	[P_RECV_ACK]	    = { sizeof(struct p_block_ack), got_BlockAck },
5936 	[P_WRITE_ACK]	    = { sizeof(struct p_block_ack), got_BlockAck },
5937 	[P_RS_WRITE_ACK]    = { sizeof(struct p_block_ack), got_BlockAck },
5938 	[P_SUPERSEDED]   = { sizeof(struct p_block_ack), got_BlockAck },
5939 	[P_NEG_ACK]	    = { sizeof(struct p_block_ack), got_NegAck },
5940 	[P_NEG_DREPLY]	    = { sizeof(struct p_block_ack), got_NegDReply },
5941 	[P_NEG_RS_DREPLY]   = { sizeof(struct p_block_ack), got_NegRSDReply },
5942 	[P_OV_RESULT]	    = { sizeof(struct p_block_ack), got_OVResult },
5943 	[P_BARRIER_ACK]	    = { sizeof(struct p_barrier_ack), got_BarrierAck },
5944 	[P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
5945 	[P_RS_IS_IN_SYNC]   = { sizeof(struct p_block_ack), got_IsInSync },
5946 	[P_DELAY_PROBE]     = { sizeof(struct p_delay_probe93), got_skip },
5947 	[P_RS_CANCEL]       = { sizeof(struct p_block_ack), got_NegRSDReply },
5948 	[P_CONN_ST_CHG_REPLY]={ sizeof(struct p_req_state_reply), got_conn_RqSReply },
5949 	[P_RETRY_WRITE]	    = { sizeof(struct p_block_ack), got_BlockAck },
5950 };
5951 
5952 int drbd_ack_receiver(struct drbd_thread *thi)
5953 {
5954 	struct drbd_connection *connection = thi->connection;
5955 	struct meta_sock_cmd *cmd = NULL;
5956 	struct packet_info pi;
5957 	unsigned long pre_recv_jif;
5958 	int rv;
5959 	void *buf    = connection->meta.rbuf;
5960 	int received = 0;
5961 	unsigned int header_size = drbd_header_size(connection);
5962 	int expect   = header_size;
5963 	bool ping_timeout_active = false;
5964 
5965 	sched_set_fifo_low(current);
5966 
5967 	while (get_t_state(thi) == RUNNING) {
5968 		drbd_thread_current_set_cpu(thi);
5969 
5970 		conn_reclaim_net_peer_reqs(connection);
5971 
5972 		if (test_and_clear_bit(SEND_PING, &connection->flags)) {
5973 			if (drbd_send_ping(connection)) {
5974 				drbd_err(connection, "drbd_send_ping has failed\n");
5975 				goto reconnect;
5976 			}
5977 			set_ping_timeout(connection);
5978 			ping_timeout_active = true;
5979 		}
5980 
5981 		pre_recv_jif = jiffies;
5982 		rv = drbd_recv_short(connection->meta.socket, buf, expect-received, 0);
5983 
5984 		/* Note:
5985 		 * -EINTR	 (on meta) we got a signal
5986 		 * -EAGAIN	 (on meta) rcvtimeo expired
5987 		 * -ECONNRESET	 other side closed the connection
5988 		 * -ERESTARTSYS  (on data) we got a signal
5989 		 * rv <  0	 other than above: unexpected error!
5990 		 * rv == expected: full header or command
5991 		 * rv <  expected: "woken" by signal during receive
5992 		 * rv == 0	 : "connection shut down by peer"
5993 		 */
5994 		if (likely(rv > 0)) {
5995 			received += rv;
5996 			buf	 += rv;
5997 		} else if (rv == 0) {
5998 			if (test_bit(DISCONNECT_SENT, &connection->flags)) {
5999 				long t;
6000 				rcu_read_lock();
6001 				t = rcu_dereference(connection->net_conf)->ping_timeo * HZ/10;
6002 				rcu_read_unlock();
6003 
6004 				t = wait_event_timeout(connection->ping_wait,
6005 						       connection->cstate < C_WF_REPORT_PARAMS,
6006 						       t);
6007 				if (t)
6008 					break;
6009 			}
6010 			drbd_err(connection, "meta connection shut down by peer.\n");
6011 			goto reconnect;
6012 		} else if (rv == -EAGAIN) {
6013 			/* If the data socket received something meanwhile,
6014 			 * that is good enough: peer is still alive. */
6015 			if (time_after(connection->last_received, pre_recv_jif))
6016 				continue;
6017 			if (ping_timeout_active) {
6018 				drbd_err(connection, "PingAck did not arrive in time.\n");
6019 				goto reconnect;
6020 			}
6021 			set_bit(SEND_PING, &connection->flags);
6022 			continue;
6023 		} else if (rv == -EINTR) {
6024 			/* maybe drbd_thread_stop(): the while condition will notice.
6025 			 * maybe woken for send_ping: we'll send a ping above,
6026 			 * and change the rcvtimeo */
6027 			flush_signals(current);
6028 			continue;
6029 		} else {
6030 			drbd_err(connection, "sock_recvmsg returned %d\n", rv);
6031 			goto reconnect;
6032 		}
6033 
6034 		if (received == expect && cmd == NULL) {
6035 			if (decode_header(connection, connection->meta.rbuf, &pi))
6036 				goto reconnect;
6037 			cmd = &ack_receiver_tbl[pi.cmd];
6038 			if (pi.cmd >= ARRAY_SIZE(ack_receiver_tbl) || !cmd->fn) {
6039 				drbd_err(connection, "Unexpected meta packet %s (0x%04x)\n",
6040 					 cmdname(pi.cmd), pi.cmd);
6041 				goto disconnect;
6042 			}
6043 			expect = header_size + cmd->pkt_size;
6044 			if (pi.size != expect - header_size) {
6045 				drbd_err(connection, "Wrong packet size on meta (c: %d, l: %d)\n",
6046 					pi.cmd, pi.size);
6047 				goto reconnect;
6048 			}
6049 		}
6050 		if (received == expect) {
6051 			bool err;
6052 
6053 			err = cmd->fn(connection, &pi);
6054 			if (err) {
6055 				drbd_err(connection, "%ps failed\n", cmd->fn);
6056 				goto reconnect;
6057 			}
6058 
6059 			connection->last_received = jiffies;
6060 
6061 			if (cmd == &ack_receiver_tbl[P_PING_ACK]) {
6062 				set_idle_timeout(connection);
6063 				ping_timeout_active = false;
6064 			}
6065 
6066 			buf	 = connection->meta.rbuf;
6067 			received = 0;
6068 			expect	 = header_size;
6069 			cmd	 = NULL;
6070 		}
6071 	}
6072 
6073 	if (0) {
6074 reconnect:
6075 		conn_request_state(connection, NS(conn, C_NETWORK_FAILURE), CS_HARD);
6076 		conn_md_sync(connection);
6077 	}
6078 	if (0) {
6079 disconnect:
6080 		conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
6081 	}
6082 
6083 	drbd_info(connection, "ack_receiver terminated\n");
6084 
6085 	return 0;
6086 }
6087 
6088 void drbd_send_acks_wf(struct work_struct *ws)
6089 {
6090 	struct drbd_peer_device *peer_device =
6091 		container_of(ws, struct drbd_peer_device, send_acks_work);
6092 	struct drbd_connection *connection = peer_device->connection;
6093 	struct drbd_device *device = peer_device->device;
6094 	struct net_conf *nc;
6095 	int tcp_cork, err;
6096 
6097 	rcu_read_lock();
6098 	nc = rcu_dereference(connection->net_conf);
6099 	tcp_cork = nc->tcp_cork;
6100 	rcu_read_unlock();
6101 
6102 	if (tcp_cork)
6103 		tcp_sock_set_cork(connection->meta.socket->sk, true);
6104 
6105 	err = drbd_finish_peer_reqs(device);
6106 	kref_put(&device->kref, drbd_destroy_device);
6107 	/* get is in drbd_endio_write_sec_final(). That is necessary to keep the
6108 	   struct work_struct send_acks_work alive, which is in the peer_device object */
6109 
6110 	if (err) {
6111 		conn_request_state(connection, NS(conn, C_NETWORK_FAILURE), CS_HARD);
6112 		return;
6113 	}
6114 
6115 	if (tcp_cork)
6116 		tcp_sock_set_cork(connection->meta.socket->sk, false);
6117 
6118 	return;
6119 }
6120