xref: /linux/drivers/block/drbd/drbd_receiver.c (revision 580b9767dbdf2c049c4d05330c70ea786ef01016)
1 /*
2    drbd_receiver.c
3 
4    This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5 
6    Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7    Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8    Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9 
10    drbd is free software; you can redistribute it and/or modify
11    it under the terms of the GNU General Public License as published by
12    the Free Software Foundation; either version 2, or (at your option)
13    any later version.
14 
15    drbd is distributed in the hope that it will be useful,
16    but WITHOUT ANY WARRANTY; without even the implied warranty of
17    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18    GNU General Public License for more details.
19 
20    You should have received a copy of the GNU General Public License
21    along with drbd; see the file COPYING.  If not, write to
22    the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23  */
24 
25 
26 #include <linux/module.h>
27 
28 #include <asm/uaccess.h>
29 #include <net/sock.h>
30 
31 #include <linux/drbd.h>
32 #include <linux/fs.h>
33 #include <linux/file.h>
34 #include <linux/in.h>
35 #include <linux/mm.h>
36 #include <linux/memcontrol.h>
37 #include <linux/mm_inline.h>
38 #include <linux/slab.h>
39 #include <linux/smp_lock.h>
40 #include <linux/pkt_sched.h>
41 #define __KERNEL_SYSCALLS__
42 #include <linux/unistd.h>
43 #include <linux/vmalloc.h>
44 #include <linux/random.h>
45 #include <linux/mm.h>
46 #include <linux/string.h>
47 #include <linux/scatterlist.h>
48 #include "drbd_int.h"
49 #include "drbd_req.h"
50 
51 #include "drbd_vli.h"
52 
53 struct flush_work {
54 	struct drbd_work w;
55 	struct drbd_epoch *epoch;
56 };
57 
58 enum finish_epoch {
59 	FE_STILL_LIVE,
60 	FE_DESTROYED,
61 	FE_RECYCLED,
62 };
63 
64 static int drbd_do_handshake(struct drbd_conf *mdev);
65 static int drbd_do_auth(struct drbd_conf *mdev);
66 
67 static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *, struct drbd_epoch *, enum epoch_event);
68 static int e_end_block(struct drbd_conf *, struct drbd_work *, int);
69 
70 static struct drbd_epoch *previous_epoch(struct drbd_conf *mdev, struct drbd_epoch *epoch)
71 {
72 	struct drbd_epoch *prev;
73 	spin_lock(&mdev->epoch_lock);
74 	prev = list_entry(epoch->list.prev, struct drbd_epoch, list);
75 	if (prev == epoch || prev == mdev->current_epoch)
76 		prev = NULL;
77 	spin_unlock(&mdev->epoch_lock);
78 	return prev;
79 }
80 
81 #define GFP_TRY	(__GFP_HIGHMEM | __GFP_NOWARN)
82 
83 static struct page *drbd_pp_first_page_or_try_alloc(struct drbd_conf *mdev)
84 {
85 	struct page *page = NULL;
86 
87 	/* Yes, testing drbd_pp_vacant outside the lock is racy.
88 	 * So what. It saves a spin_lock. */
89 	if (drbd_pp_vacant > 0) {
90 		spin_lock(&drbd_pp_lock);
91 		page = drbd_pp_pool;
92 		if (page) {
93 			drbd_pp_pool = (struct page *)page_private(page);
94 			set_page_private(page, 0); /* just to be polite */
95 			drbd_pp_vacant--;
96 		}
97 		spin_unlock(&drbd_pp_lock);
98 	}
99 	/* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
100 	 * "criss-cross" setup, that might cause write-out on some other DRBD,
101 	 * which in turn might block on the other node at this very place.  */
102 	if (!page)
103 		page = alloc_page(GFP_TRY);
104 	if (page)
105 		atomic_inc(&mdev->pp_in_use);
106 	return page;
107 }
108 
109 /* kick lower level device, if we have more than (arbitrary number)
110  * reference counts on it, which typically are locally submitted io
111  * requests.  don't use unacked_cnt, so we speed up proto A and B, too. */
112 static void maybe_kick_lo(struct drbd_conf *mdev)
113 {
114 	if (atomic_read(&mdev->local_cnt) >= mdev->net_conf->unplug_watermark)
115 		drbd_kick_lo(mdev);
116 }
117 
118 static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed)
119 {
120 	struct drbd_epoch_entry *e;
121 	struct list_head *le, *tle;
122 
123 	/* The EEs are always appended to the end of the list. Since
124 	   they are sent in order over the wire, they have to finish
125 	   in order. As soon as we see the first not finished we can
126 	   stop to examine the list... */
127 
128 	list_for_each_safe(le, tle, &mdev->net_ee) {
129 		e = list_entry(le, struct drbd_epoch_entry, w.list);
130 		if (drbd_bio_has_active_page(e->private_bio))
131 			break;
132 		list_move(le, to_be_freed);
133 	}
134 }
135 
136 static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
137 {
138 	LIST_HEAD(reclaimed);
139 	struct drbd_epoch_entry *e, *t;
140 
141 	maybe_kick_lo(mdev);
142 	spin_lock_irq(&mdev->req_lock);
143 	reclaim_net_ee(mdev, &reclaimed);
144 	spin_unlock_irq(&mdev->req_lock);
145 
146 	list_for_each_entry_safe(e, t, &reclaimed, w.list)
147 		drbd_free_ee(mdev, e);
148 }
149 
150 /**
151  * drbd_pp_alloc() - Returns a page, fails only if a signal comes in
152  * @mdev:	DRBD device.
153  * @retry:	whether or not to retry allocation forever (or until signalled)
154  *
155  * Tries to allocate a page, first from our own page pool, then from the
156  * kernel, unless this allocation would exceed the max_buffers setting.
157  * If @retry is non-zero, retry until DRBD frees a page somewhere else.
158  */
159 static struct page *drbd_pp_alloc(struct drbd_conf *mdev, int retry)
160 {
161 	struct page *page = NULL;
162 	DEFINE_WAIT(wait);
163 
164 	if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers) {
165 		page = drbd_pp_first_page_or_try_alloc(mdev);
166 		if (page)
167 			return page;
168 	}
169 
170 	for (;;) {
171 		prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
172 
173 		drbd_kick_lo_and_reclaim_net(mdev);
174 
175 		if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers) {
176 			page = drbd_pp_first_page_or_try_alloc(mdev);
177 			if (page)
178 				break;
179 		}
180 
181 		if (!retry)
182 			break;
183 
184 		if (signal_pending(current)) {
185 			dev_warn(DEV, "drbd_pp_alloc interrupted!\n");
186 			break;
187 		}
188 
189 		schedule();
190 	}
191 	finish_wait(&drbd_pp_wait, &wait);
192 
193 	return page;
194 }
195 
196 /* Must not be used from irq, as that may deadlock: see drbd_pp_alloc.
197  * Is also used from inside an other spin_lock_irq(&mdev->req_lock) */
198 static void drbd_pp_free(struct drbd_conf *mdev, struct page *page)
199 {
200 	int free_it;
201 
202 	spin_lock(&drbd_pp_lock);
203 	if (drbd_pp_vacant > (DRBD_MAX_SEGMENT_SIZE/PAGE_SIZE)*minor_count) {
204 		free_it = 1;
205 	} else {
206 		set_page_private(page, (unsigned long)drbd_pp_pool);
207 		drbd_pp_pool = page;
208 		drbd_pp_vacant++;
209 		free_it = 0;
210 	}
211 	spin_unlock(&drbd_pp_lock);
212 
213 	atomic_dec(&mdev->pp_in_use);
214 
215 	if (free_it)
216 		__free_page(page);
217 
218 	wake_up(&drbd_pp_wait);
219 }
220 
221 static void drbd_pp_free_bio_pages(struct drbd_conf *mdev, struct bio *bio)
222 {
223 	struct page *p_to_be_freed = NULL;
224 	struct page *page;
225 	struct bio_vec *bvec;
226 	int i;
227 
228 	spin_lock(&drbd_pp_lock);
229 	__bio_for_each_segment(bvec, bio, i, 0) {
230 		if (drbd_pp_vacant > (DRBD_MAX_SEGMENT_SIZE/PAGE_SIZE)*minor_count) {
231 			set_page_private(bvec->bv_page, (unsigned long)p_to_be_freed);
232 			p_to_be_freed = bvec->bv_page;
233 		} else {
234 			set_page_private(bvec->bv_page, (unsigned long)drbd_pp_pool);
235 			drbd_pp_pool = bvec->bv_page;
236 			drbd_pp_vacant++;
237 		}
238 	}
239 	spin_unlock(&drbd_pp_lock);
240 	atomic_sub(bio->bi_vcnt, &mdev->pp_in_use);
241 
242 	while (p_to_be_freed) {
243 		page = p_to_be_freed;
244 		p_to_be_freed = (struct page *)page_private(page);
245 		set_page_private(page, 0); /* just to be polite */
246 		put_page(page);
247 	}
248 
249 	wake_up(&drbd_pp_wait);
250 }
251 
252 /*
253 You need to hold the req_lock:
254  _drbd_wait_ee_list_empty()
255 
256 You must not have the req_lock:
257  drbd_free_ee()
258  drbd_alloc_ee()
259  drbd_init_ee()
260  drbd_release_ee()
261  drbd_ee_fix_bhs()
262  drbd_process_done_ee()
263  drbd_clear_done_ee()
264  drbd_wait_ee_list_empty()
265 */
266 
267 struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
268 				     u64 id,
269 				     sector_t sector,
270 				     unsigned int data_size,
271 				     gfp_t gfp_mask) __must_hold(local)
272 {
273 	struct request_queue *q;
274 	struct drbd_epoch_entry *e;
275 	struct page *page;
276 	struct bio *bio;
277 	unsigned int ds;
278 
279 	if (FAULT_ACTIVE(mdev, DRBD_FAULT_AL_EE))
280 		return NULL;
281 
282 	e = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
283 	if (!e) {
284 		if (!(gfp_mask & __GFP_NOWARN))
285 			dev_err(DEV, "alloc_ee: Allocation of an EE failed\n");
286 		return NULL;
287 	}
288 
289 	bio = bio_alloc(gfp_mask & ~__GFP_HIGHMEM, div_ceil(data_size, PAGE_SIZE));
290 	if (!bio) {
291 		if (!(gfp_mask & __GFP_NOWARN))
292 			dev_err(DEV, "alloc_ee: Allocation of a bio failed\n");
293 		goto fail1;
294 	}
295 
296 	bio->bi_bdev = mdev->ldev->backing_bdev;
297 	bio->bi_sector = sector;
298 
299 	ds = data_size;
300 	while (ds) {
301 		page = drbd_pp_alloc(mdev, (gfp_mask & __GFP_WAIT));
302 		if (!page) {
303 			if (!(gfp_mask & __GFP_NOWARN))
304 				dev_err(DEV, "alloc_ee: Allocation of a page failed\n");
305 			goto fail2;
306 		}
307 		if (!bio_add_page(bio, page, min_t(int, ds, PAGE_SIZE), 0)) {
308 			drbd_pp_free(mdev, page);
309 			dev_err(DEV, "alloc_ee: bio_add_page(s=%llu,"
310 			    "data_size=%u,ds=%u) failed\n",
311 			    (unsigned long long)sector, data_size, ds);
312 
313 			q = bdev_get_queue(bio->bi_bdev);
314 			if (q->merge_bvec_fn) {
315 				struct bvec_merge_data bvm = {
316 					.bi_bdev = bio->bi_bdev,
317 					.bi_sector = bio->bi_sector,
318 					.bi_size = bio->bi_size,
319 					.bi_rw = bio->bi_rw,
320 				};
321 				int l = q->merge_bvec_fn(q, &bvm,
322 						&bio->bi_io_vec[bio->bi_vcnt]);
323 				dev_err(DEV, "merge_bvec_fn() = %d\n", l);
324 			}
325 
326 			/* dump more of the bio. */
327 			dev_err(DEV, "bio->bi_max_vecs = %d\n", bio->bi_max_vecs);
328 			dev_err(DEV, "bio->bi_vcnt = %d\n", bio->bi_vcnt);
329 			dev_err(DEV, "bio->bi_size = %d\n", bio->bi_size);
330 			dev_err(DEV, "bio->bi_phys_segments = %d\n", bio->bi_phys_segments);
331 
332 			goto fail2;
333 			break;
334 		}
335 		ds -= min_t(int, ds, PAGE_SIZE);
336 	}
337 
338 	D_ASSERT(data_size == bio->bi_size);
339 
340 	bio->bi_private = e;
341 	e->mdev = mdev;
342 	e->sector = sector;
343 	e->size = bio->bi_size;
344 
345 	e->private_bio = bio;
346 	e->block_id = id;
347 	INIT_HLIST_NODE(&e->colision);
348 	e->epoch = NULL;
349 	e->flags = 0;
350 
351 	return e;
352 
353  fail2:
354 	drbd_pp_free_bio_pages(mdev, bio);
355 	bio_put(bio);
356  fail1:
357 	mempool_free(e, drbd_ee_mempool);
358 
359 	return NULL;
360 }
361 
362 void drbd_free_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e)
363 {
364 	struct bio *bio = e->private_bio;
365 	drbd_pp_free_bio_pages(mdev, bio);
366 	bio_put(bio);
367 	D_ASSERT(hlist_unhashed(&e->colision));
368 	mempool_free(e, drbd_ee_mempool);
369 }
370 
371 int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list)
372 {
373 	LIST_HEAD(work_list);
374 	struct drbd_epoch_entry *e, *t;
375 	int count = 0;
376 
377 	spin_lock_irq(&mdev->req_lock);
378 	list_splice_init(list, &work_list);
379 	spin_unlock_irq(&mdev->req_lock);
380 
381 	list_for_each_entry_safe(e, t, &work_list, w.list) {
382 		drbd_free_ee(mdev, e);
383 		count++;
384 	}
385 	return count;
386 }
387 
388 
389 /*
390  * This function is called from _asender only_
391  * but see also comments in _req_mod(,barrier_acked)
392  * and receive_Barrier.
393  *
394  * Move entries from net_ee to done_ee, if ready.
395  * Grab done_ee, call all callbacks, free the entries.
396  * The callbacks typically send out ACKs.
397  */
398 static int drbd_process_done_ee(struct drbd_conf *mdev)
399 {
400 	LIST_HEAD(work_list);
401 	LIST_HEAD(reclaimed);
402 	struct drbd_epoch_entry *e, *t;
403 	int ok = (mdev->state.conn >= C_WF_REPORT_PARAMS);
404 
405 	spin_lock_irq(&mdev->req_lock);
406 	reclaim_net_ee(mdev, &reclaimed);
407 	list_splice_init(&mdev->done_ee, &work_list);
408 	spin_unlock_irq(&mdev->req_lock);
409 
410 	list_for_each_entry_safe(e, t, &reclaimed, w.list)
411 		drbd_free_ee(mdev, e);
412 
413 	/* possible callbacks here:
414 	 * e_end_block, and e_end_resync_block, e_send_discard_ack.
415 	 * all ignore the last argument.
416 	 */
417 	list_for_each_entry_safe(e, t, &work_list, w.list) {
418 		/* list_del not necessary, next/prev members not touched */
419 		ok = e->w.cb(mdev, &e->w, !ok) && ok;
420 		drbd_free_ee(mdev, e);
421 	}
422 	wake_up(&mdev->ee_wait);
423 
424 	return ok;
425 }
426 
427 void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
428 {
429 	DEFINE_WAIT(wait);
430 
431 	/* avoids spin_lock/unlock
432 	 * and calling prepare_to_wait in the fast path */
433 	while (!list_empty(head)) {
434 		prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
435 		spin_unlock_irq(&mdev->req_lock);
436 		drbd_kick_lo(mdev);
437 		schedule();
438 		finish_wait(&mdev->ee_wait, &wait);
439 		spin_lock_irq(&mdev->req_lock);
440 	}
441 }
442 
443 void drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
444 {
445 	spin_lock_irq(&mdev->req_lock);
446 	_drbd_wait_ee_list_empty(mdev, head);
447 	spin_unlock_irq(&mdev->req_lock);
448 }
449 
450 /* see also kernel_accept; which is only present since 2.6.18.
451  * also we want to log which part of it failed, exactly */
452 static int drbd_accept(struct drbd_conf *mdev, const char **what,
453 		struct socket *sock, struct socket **newsock)
454 {
455 	struct sock *sk = sock->sk;
456 	int err = 0;
457 
458 	*what = "listen";
459 	err = sock->ops->listen(sock, 5);
460 	if (err < 0)
461 		goto out;
462 
463 	*what = "sock_create_lite";
464 	err = sock_create_lite(sk->sk_family, sk->sk_type, sk->sk_protocol,
465 			       newsock);
466 	if (err < 0)
467 		goto out;
468 
469 	*what = "accept";
470 	err = sock->ops->accept(sock, *newsock, 0);
471 	if (err < 0) {
472 		sock_release(*newsock);
473 		*newsock = NULL;
474 		goto out;
475 	}
476 	(*newsock)->ops  = sock->ops;
477 
478 out:
479 	return err;
480 }
481 
482 static int drbd_recv_short(struct drbd_conf *mdev, struct socket *sock,
483 		    void *buf, size_t size, int flags)
484 {
485 	mm_segment_t oldfs;
486 	struct kvec iov = {
487 		.iov_base = buf,
488 		.iov_len = size,
489 	};
490 	struct msghdr msg = {
491 		.msg_iovlen = 1,
492 		.msg_iov = (struct iovec *)&iov,
493 		.msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
494 	};
495 	int rv;
496 
497 	oldfs = get_fs();
498 	set_fs(KERNEL_DS);
499 	rv = sock_recvmsg(sock, &msg, size, msg.msg_flags);
500 	set_fs(oldfs);
501 
502 	return rv;
503 }
504 
505 static int drbd_recv(struct drbd_conf *mdev, void *buf, size_t size)
506 {
507 	mm_segment_t oldfs;
508 	struct kvec iov = {
509 		.iov_base = buf,
510 		.iov_len = size,
511 	};
512 	struct msghdr msg = {
513 		.msg_iovlen = 1,
514 		.msg_iov = (struct iovec *)&iov,
515 		.msg_flags = MSG_WAITALL | MSG_NOSIGNAL
516 	};
517 	int rv;
518 
519 	oldfs = get_fs();
520 	set_fs(KERNEL_DS);
521 
522 	for (;;) {
523 		rv = sock_recvmsg(mdev->data.socket, &msg, size, msg.msg_flags);
524 		if (rv == size)
525 			break;
526 
527 		/* Note:
528 		 * ECONNRESET	other side closed the connection
529 		 * ERESTARTSYS	(on  sock) we got a signal
530 		 */
531 
532 		if (rv < 0) {
533 			if (rv == -ECONNRESET)
534 				dev_info(DEV, "sock was reset by peer\n");
535 			else if (rv != -ERESTARTSYS)
536 				dev_err(DEV, "sock_recvmsg returned %d\n", rv);
537 			break;
538 		} else if (rv == 0) {
539 			dev_info(DEV, "sock was shut down by peer\n");
540 			break;
541 		} else	{
542 			/* signal came in, or peer/link went down,
543 			 * after we read a partial message
544 			 */
545 			/* D_ASSERT(signal_pending(current)); */
546 			break;
547 		}
548 	};
549 
550 	set_fs(oldfs);
551 
552 	if (rv != size)
553 		drbd_force_state(mdev, NS(conn, C_BROKEN_PIPE));
554 
555 	return rv;
556 }
557 
558 static struct socket *drbd_try_connect(struct drbd_conf *mdev)
559 {
560 	const char *what;
561 	struct socket *sock;
562 	struct sockaddr_in6 src_in6;
563 	int err;
564 	int disconnect_on_error = 1;
565 
566 	if (!get_net_conf(mdev))
567 		return NULL;
568 
569 	what = "sock_create_kern";
570 	err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
571 		SOCK_STREAM, IPPROTO_TCP, &sock);
572 	if (err < 0) {
573 		sock = NULL;
574 		goto out;
575 	}
576 
577 	sock->sk->sk_rcvtimeo =
578 	sock->sk->sk_sndtimeo =  mdev->net_conf->try_connect_int*HZ;
579 
580        /* explicitly bind to the configured IP as source IP
581 	*  for the outgoing connections.
582 	*  This is needed for multihomed hosts and to be
583 	*  able to use lo: interfaces for drbd.
584 	* Make sure to use 0 as port number, so linux selects
585 	*  a free one dynamically.
586 	*/
587 	memcpy(&src_in6, mdev->net_conf->my_addr,
588 	       min_t(int, mdev->net_conf->my_addr_len, sizeof(src_in6)));
589 	if (((struct sockaddr *)mdev->net_conf->my_addr)->sa_family == AF_INET6)
590 		src_in6.sin6_port = 0;
591 	else
592 		((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
593 
594 	what = "bind before connect";
595 	err = sock->ops->bind(sock,
596 			      (struct sockaddr *) &src_in6,
597 			      mdev->net_conf->my_addr_len);
598 	if (err < 0)
599 		goto out;
600 
601 	/* connect may fail, peer not yet available.
602 	 * stay C_WF_CONNECTION, don't go Disconnecting! */
603 	disconnect_on_error = 0;
604 	what = "connect";
605 	err = sock->ops->connect(sock,
606 				 (struct sockaddr *)mdev->net_conf->peer_addr,
607 				 mdev->net_conf->peer_addr_len, 0);
608 
609 out:
610 	if (err < 0) {
611 		if (sock) {
612 			sock_release(sock);
613 			sock = NULL;
614 		}
615 		switch (-err) {
616 			/* timeout, busy, signal pending */
617 		case ETIMEDOUT: case EAGAIN: case EINPROGRESS:
618 		case EINTR: case ERESTARTSYS:
619 			/* peer not (yet) available, network problem */
620 		case ECONNREFUSED: case ENETUNREACH:
621 		case EHOSTDOWN:    case EHOSTUNREACH:
622 			disconnect_on_error = 0;
623 			break;
624 		default:
625 			dev_err(DEV, "%s failed, err = %d\n", what, err);
626 		}
627 		if (disconnect_on_error)
628 			drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
629 	}
630 	put_net_conf(mdev);
631 	return sock;
632 }
633 
634 static struct socket *drbd_wait_for_connect(struct drbd_conf *mdev)
635 {
636 	int timeo, err;
637 	struct socket *s_estab = NULL, *s_listen;
638 	const char *what;
639 
640 	if (!get_net_conf(mdev))
641 		return NULL;
642 
643 	what = "sock_create_kern";
644 	err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
645 		SOCK_STREAM, IPPROTO_TCP, &s_listen);
646 	if (err) {
647 		s_listen = NULL;
648 		goto out;
649 	}
650 
651 	timeo = mdev->net_conf->try_connect_int * HZ;
652 	timeo += (random32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */
653 
654 	s_listen->sk->sk_reuse    = 1; /* SO_REUSEADDR */
655 	s_listen->sk->sk_rcvtimeo = timeo;
656 	s_listen->sk->sk_sndtimeo = timeo;
657 
658 	what = "bind before listen";
659 	err = s_listen->ops->bind(s_listen,
660 			      (struct sockaddr *) mdev->net_conf->my_addr,
661 			      mdev->net_conf->my_addr_len);
662 	if (err < 0)
663 		goto out;
664 
665 	err = drbd_accept(mdev, &what, s_listen, &s_estab);
666 
667 out:
668 	if (s_listen)
669 		sock_release(s_listen);
670 	if (err < 0) {
671 		if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
672 			dev_err(DEV, "%s failed, err = %d\n", what, err);
673 			drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
674 		}
675 	}
676 	put_net_conf(mdev);
677 
678 	return s_estab;
679 }
680 
681 static int drbd_send_fp(struct drbd_conf *mdev,
682 	struct socket *sock, enum drbd_packets cmd)
683 {
684 	struct p_header *h = (struct p_header *) &mdev->data.sbuf.header;
685 
686 	return _drbd_send_cmd(mdev, sock, cmd, h, sizeof(*h), 0);
687 }
688 
689 static enum drbd_packets drbd_recv_fp(struct drbd_conf *mdev, struct socket *sock)
690 {
691 	struct p_header *h = (struct p_header *) &mdev->data.sbuf.header;
692 	int rr;
693 
694 	rr = drbd_recv_short(mdev, sock, h, sizeof(*h), 0);
695 
696 	if (rr == sizeof(*h) && h->magic == BE_DRBD_MAGIC)
697 		return be16_to_cpu(h->command);
698 
699 	return 0xffff;
700 }
701 
702 /**
703  * drbd_socket_okay() - Free the socket if its connection is not okay
704  * @mdev:	DRBD device.
705  * @sock:	pointer to the pointer to the socket.
706  */
707 static int drbd_socket_okay(struct drbd_conf *mdev, struct socket **sock)
708 {
709 	int rr;
710 	char tb[4];
711 
712 	if (!*sock)
713 		return FALSE;
714 
715 	rr = drbd_recv_short(mdev, *sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
716 
717 	if (rr > 0 || rr == -EAGAIN) {
718 		return TRUE;
719 	} else {
720 		sock_release(*sock);
721 		*sock = NULL;
722 		return FALSE;
723 	}
724 }
725 
726 /*
727  * return values:
728  *   1 yes, we have a valid connection
729  *   0 oops, did not work out, please try again
730  *  -1 peer talks different language,
731  *     no point in trying again, please go standalone.
732  *  -2 We do not have a network config...
733  */
734 static int drbd_connect(struct drbd_conf *mdev)
735 {
736 	struct socket *s, *sock, *msock;
737 	int try, h, ok;
738 
739 	D_ASSERT(!mdev->data.socket);
740 
741 	if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags))
742 		dev_err(DEV, "CREATE_BARRIER flag was set in drbd_connect - now cleared!\n");
743 
744 	if (drbd_request_state(mdev, NS(conn, C_WF_CONNECTION)) < SS_SUCCESS)
745 		return -2;
746 
747 	clear_bit(DISCARD_CONCURRENT, &mdev->flags);
748 
749 	sock  = NULL;
750 	msock = NULL;
751 
752 	do {
753 		for (try = 0;;) {
754 			/* 3 tries, this should take less than a second! */
755 			s = drbd_try_connect(mdev);
756 			if (s || ++try >= 3)
757 				break;
758 			/* give the other side time to call bind() & listen() */
759 			__set_current_state(TASK_INTERRUPTIBLE);
760 			schedule_timeout(HZ / 10);
761 		}
762 
763 		if (s) {
764 			if (!sock) {
765 				drbd_send_fp(mdev, s, P_HAND_SHAKE_S);
766 				sock = s;
767 				s = NULL;
768 			} else if (!msock) {
769 				drbd_send_fp(mdev, s, P_HAND_SHAKE_M);
770 				msock = s;
771 				s = NULL;
772 			} else {
773 				dev_err(DEV, "Logic error in drbd_connect()\n");
774 				goto out_release_sockets;
775 			}
776 		}
777 
778 		if (sock && msock) {
779 			__set_current_state(TASK_INTERRUPTIBLE);
780 			schedule_timeout(HZ / 10);
781 			ok = drbd_socket_okay(mdev, &sock);
782 			ok = drbd_socket_okay(mdev, &msock) && ok;
783 			if (ok)
784 				break;
785 		}
786 
787 retry:
788 		s = drbd_wait_for_connect(mdev);
789 		if (s) {
790 			try = drbd_recv_fp(mdev, s);
791 			drbd_socket_okay(mdev, &sock);
792 			drbd_socket_okay(mdev, &msock);
793 			switch (try) {
794 			case P_HAND_SHAKE_S:
795 				if (sock) {
796 					dev_warn(DEV, "initial packet S crossed\n");
797 					sock_release(sock);
798 				}
799 				sock = s;
800 				break;
801 			case P_HAND_SHAKE_M:
802 				if (msock) {
803 					dev_warn(DEV, "initial packet M crossed\n");
804 					sock_release(msock);
805 				}
806 				msock = s;
807 				set_bit(DISCARD_CONCURRENT, &mdev->flags);
808 				break;
809 			default:
810 				dev_warn(DEV, "Error receiving initial packet\n");
811 				sock_release(s);
812 				if (random32() & 1)
813 					goto retry;
814 			}
815 		}
816 
817 		if (mdev->state.conn <= C_DISCONNECTING)
818 			goto out_release_sockets;
819 		if (signal_pending(current)) {
820 			flush_signals(current);
821 			smp_rmb();
822 			if (get_t_state(&mdev->receiver) == Exiting)
823 				goto out_release_sockets;
824 		}
825 
826 		if (sock && msock) {
827 			ok = drbd_socket_okay(mdev, &sock);
828 			ok = drbd_socket_okay(mdev, &msock) && ok;
829 			if (ok)
830 				break;
831 		}
832 	} while (1);
833 
834 	msock->sk->sk_reuse = 1; /* SO_REUSEADDR */
835 	sock->sk->sk_reuse = 1; /* SO_REUSEADDR */
836 
837 	sock->sk->sk_allocation = GFP_NOIO;
838 	msock->sk->sk_allocation = GFP_NOIO;
839 
840 	sock->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
841 	msock->sk->sk_priority = TC_PRIO_INTERACTIVE;
842 
843 	if (mdev->net_conf->sndbuf_size) {
844 		sock->sk->sk_sndbuf = mdev->net_conf->sndbuf_size;
845 		sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
846 	}
847 
848 	if (mdev->net_conf->rcvbuf_size) {
849 		sock->sk->sk_rcvbuf = mdev->net_conf->rcvbuf_size;
850 		sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
851 	}
852 
853 	/* NOT YET ...
854 	 * sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
855 	 * sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
856 	 * first set it to the P_HAND_SHAKE timeout,
857 	 * which we set to 4x the configured ping_timeout. */
858 	sock->sk->sk_sndtimeo =
859 	sock->sk->sk_rcvtimeo = mdev->net_conf->ping_timeo*4*HZ/10;
860 
861 	msock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
862 	msock->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
863 
864 	/* we don't want delays.
865 	 * we use TCP_CORK where apropriate, though */
866 	drbd_tcp_nodelay(sock);
867 	drbd_tcp_nodelay(msock);
868 
869 	mdev->data.socket = sock;
870 	mdev->meta.socket = msock;
871 	mdev->last_received = jiffies;
872 
873 	D_ASSERT(mdev->asender.task == NULL);
874 
875 	h = drbd_do_handshake(mdev);
876 	if (h <= 0)
877 		return h;
878 
879 	if (mdev->cram_hmac_tfm) {
880 		/* drbd_request_state(mdev, NS(conn, WFAuth)); */
881 		switch (drbd_do_auth(mdev)) {
882 		case -1:
883 			dev_err(DEV, "Authentication of peer failed\n");
884 			return -1;
885 		case 0:
886 			dev_err(DEV, "Authentication of peer failed, trying again.\n");
887 			return 0;
888 		}
889 	}
890 
891 	if (drbd_request_state(mdev, NS(conn, C_WF_REPORT_PARAMS)) < SS_SUCCESS)
892 		return 0;
893 
894 	sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
895 	sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
896 
897 	atomic_set(&mdev->packet_seq, 0);
898 	mdev->peer_seq = 0;
899 
900 	drbd_thread_start(&mdev->asender);
901 
902 	drbd_send_protocol(mdev);
903 	drbd_send_sync_param(mdev, &mdev->sync_conf);
904 	drbd_send_sizes(mdev, 0);
905 	drbd_send_uuids(mdev);
906 	drbd_send_state(mdev);
907 	clear_bit(USE_DEGR_WFC_T, &mdev->flags);
908 	clear_bit(RESIZE_PENDING, &mdev->flags);
909 
910 	return 1;
911 
912 out_release_sockets:
913 	if (sock)
914 		sock_release(sock);
915 	if (msock)
916 		sock_release(msock);
917 	return -1;
918 }
919 
920 static int drbd_recv_header(struct drbd_conf *mdev, struct p_header *h)
921 {
922 	int r;
923 
924 	r = drbd_recv(mdev, h, sizeof(*h));
925 
926 	if (unlikely(r != sizeof(*h))) {
927 		dev_err(DEV, "short read expecting header on sock: r=%d\n", r);
928 		return FALSE;
929 	};
930 	h->command = be16_to_cpu(h->command);
931 	h->length  = be16_to_cpu(h->length);
932 	if (unlikely(h->magic != BE_DRBD_MAGIC)) {
933 		dev_err(DEV, "magic?? on data m: 0x%lx c: %d l: %d\n",
934 		    (long)be32_to_cpu(h->magic),
935 		    h->command, h->length);
936 		return FALSE;
937 	}
938 	mdev->last_received = jiffies;
939 
940 	return TRUE;
941 }
942 
943 static enum finish_epoch drbd_flush_after_epoch(struct drbd_conf *mdev, struct drbd_epoch *epoch)
944 {
945 	int rv;
946 
947 	if (mdev->write_ordering >= WO_bdev_flush && get_ldev(mdev)) {
948 		rv = blkdev_issue_flush(mdev->ldev->backing_bdev, NULL);
949 		if (rv) {
950 			dev_err(DEV, "local disk flush failed with status %d\n", rv);
951 			/* would rather check on EOPNOTSUPP, but that is not reliable.
952 			 * don't try again for ANY return value != 0
953 			 * if (rv == -EOPNOTSUPP) */
954 			drbd_bump_write_ordering(mdev, WO_drain_io);
955 		}
956 		put_ldev(mdev);
957 	}
958 
959 	return drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE);
960 }
961 
962 static int w_flush(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
963 {
964 	struct flush_work *fw = (struct flush_work *)w;
965 	struct drbd_epoch *epoch = fw->epoch;
966 
967 	kfree(w);
968 
969 	if (!test_and_set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags))
970 		drbd_flush_after_epoch(mdev, epoch);
971 
972 	drbd_may_finish_epoch(mdev, epoch, EV_PUT |
973 			      (mdev->state.conn < C_CONNECTED ? EV_CLEANUP : 0));
974 
975 	return 1;
976 }
977 
978 /**
979  * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
980  * @mdev:	DRBD device.
981  * @epoch:	Epoch object.
982  * @ev:		Epoch event.
983  */
984 static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
985 					       struct drbd_epoch *epoch,
986 					       enum epoch_event ev)
987 {
988 	int finish, epoch_size;
989 	struct drbd_epoch *next_epoch;
990 	int schedule_flush = 0;
991 	enum finish_epoch rv = FE_STILL_LIVE;
992 
993 	spin_lock(&mdev->epoch_lock);
994 	do {
995 		next_epoch = NULL;
996 		finish = 0;
997 
998 		epoch_size = atomic_read(&epoch->epoch_size);
999 
1000 		switch (ev & ~EV_CLEANUP) {
1001 		case EV_PUT:
1002 			atomic_dec(&epoch->active);
1003 			break;
1004 		case EV_GOT_BARRIER_NR:
1005 			set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
1006 
1007 			/* Special case: If we just switched from WO_bio_barrier to
1008 			   WO_bdev_flush we should not finish the current epoch */
1009 			if (test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags) && epoch_size == 1 &&
1010 			    mdev->write_ordering != WO_bio_barrier &&
1011 			    epoch == mdev->current_epoch)
1012 				clear_bit(DE_CONTAINS_A_BARRIER, &epoch->flags);
1013 			break;
1014 		case EV_BARRIER_DONE:
1015 			set_bit(DE_BARRIER_IN_NEXT_EPOCH_DONE, &epoch->flags);
1016 			break;
1017 		case EV_BECAME_LAST:
1018 			/* nothing to do*/
1019 			break;
1020 		}
1021 
1022 		if (epoch_size != 0 &&
1023 		    atomic_read(&epoch->active) == 0 &&
1024 		    test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) &&
1025 		    epoch->list.prev == &mdev->current_epoch->list &&
1026 		    !test_bit(DE_IS_FINISHING, &epoch->flags)) {
1027 			/* Nearly all conditions are met to finish that epoch... */
1028 			if (test_bit(DE_BARRIER_IN_NEXT_EPOCH_DONE, &epoch->flags) ||
1029 			    mdev->write_ordering == WO_none ||
1030 			    (epoch_size == 1 && test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) ||
1031 			    ev & EV_CLEANUP) {
1032 				finish = 1;
1033 				set_bit(DE_IS_FINISHING, &epoch->flags);
1034 			} else if (!test_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags) &&
1035 				 mdev->write_ordering == WO_bio_barrier) {
1036 				atomic_inc(&epoch->active);
1037 				schedule_flush = 1;
1038 			}
1039 		}
1040 		if (finish) {
1041 			if (!(ev & EV_CLEANUP)) {
1042 				spin_unlock(&mdev->epoch_lock);
1043 				drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size);
1044 				spin_lock(&mdev->epoch_lock);
1045 			}
1046 			dec_unacked(mdev);
1047 
1048 			if (mdev->current_epoch != epoch) {
1049 				next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
1050 				list_del(&epoch->list);
1051 				ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
1052 				mdev->epochs--;
1053 				kfree(epoch);
1054 
1055 				if (rv == FE_STILL_LIVE)
1056 					rv = FE_DESTROYED;
1057 			} else {
1058 				epoch->flags = 0;
1059 				atomic_set(&epoch->epoch_size, 0);
1060 				/* atomic_set(&epoch->active, 0); is alrady zero */
1061 				if (rv == FE_STILL_LIVE)
1062 					rv = FE_RECYCLED;
1063 			}
1064 		}
1065 
1066 		if (!next_epoch)
1067 			break;
1068 
1069 		epoch = next_epoch;
1070 	} while (1);
1071 
1072 	spin_unlock(&mdev->epoch_lock);
1073 
1074 	if (schedule_flush) {
1075 		struct flush_work *fw;
1076 		fw = kmalloc(sizeof(*fw), GFP_ATOMIC);
1077 		if (fw) {
1078 			fw->w.cb = w_flush;
1079 			fw->epoch = epoch;
1080 			drbd_queue_work(&mdev->data.work, &fw->w);
1081 		} else {
1082 			dev_warn(DEV, "Could not kmalloc a flush_work obj\n");
1083 			set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags);
1084 			/* That is not a recursion, only one level */
1085 			drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE);
1086 			drbd_may_finish_epoch(mdev, epoch, EV_PUT);
1087 		}
1088 	}
1089 
1090 	return rv;
1091 }
1092 
1093 /**
1094  * drbd_bump_write_ordering() - Fall back to an other write ordering method
1095  * @mdev:	DRBD device.
1096  * @wo:		Write ordering method to try.
1097  */
1098 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo) __must_hold(local)
1099 {
1100 	enum write_ordering_e pwo;
1101 	static char *write_ordering_str[] = {
1102 		[WO_none] = "none",
1103 		[WO_drain_io] = "drain",
1104 		[WO_bdev_flush] = "flush",
1105 		[WO_bio_barrier] = "barrier",
1106 	};
1107 
1108 	pwo = mdev->write_ordering;
1109 	wo = min(pwo, wo);
1110 	if (wo == WO_bio_barrier && mdev->ldev->dc.no_disk_barrier)
1111 		wo = WO_bdev_flush;
1112 	if (wo == WO_bdev_flush && mdev->ldev->dc.no_disk_flush)
1113 		wo = WO_drain_io;
1114 	if (wo == WO_drain_io && mdev->ldev->dc.no_disk_drain)
1115 		wo = WO_none;
1116 	mdev->write_ordering = wo;
1117 	if (pwo != mdev->write_ordering || wo == WO_bio_barrier)
1118 		dev_info(DEV, "Method to ensure write ordering: %s\n", write_ordering_str[mdev->write_ordering]);
1119 }
1120 
1121 /**
1122  * w_e_reissue() - Worker callback; Resubmit a bio, without BIO_RW_BARRIER set
1123  * @mdev:	DRBD device.
1124  * @w:		work object.
1125  * @cancel:	The connection will be closed anyways (unused in this callback)
1126  */
1127 int w_e_reissue(struct drbd_conf *mdev, struct drbd_work *w, int cancel) __releases(local)
1128 {
1129 	struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1130 	struct bio *bio = e->private_bio;
1131 
1132 	/* We leave DE_CONTAINS_A_BARRIER and EE_IS_BARRIER in place,
1133 	   (and DE_BARRIER_IN_NEXT_EPOCH_ISSUED in the previous Epoch)
1134 	   so that we can finish that epoch in drbd_may_finish_epoch().
1135 	   That is necessary if we already have a long chain of Epochs, before
1136 	   we realize that BIO_RW_BARRIER is actually not supported */
1137 
1138 	/* As long as the -ENOTSUPP on the barrier is reported immediately
1139 	   that will never trigger. If it is reported late, we will just
1140 	   print that warning and continue correctly for all future requests
1141 	   with WO_bdev_flush */
1142 	if (previous_epoch(mdev, e->epoch))
1143 		dev_warn(DEV, "Write ordering was not enforced (one time event)\n");
1144 
1145 	/* prepare bio for re-submit,
1146 	 * re-init volatile members */
1147 	/* we still have a local reference,
1148 	 * get_ldev was done in receive_Data. */
1149 	bio->bi_bdev = mdev->ldev->backing_bdev;
1150 	bio->bi_sector = e->sector;
1151 	bio->bi_size = e->size;
1152 	bio->bi_idx = 0;
1153 
1154 	bio->bi_flags &= ~(BIO_POOL_MASK - 1);
1155 	bio->bi_flags |= 1 << BIO_UPTODATE;
1156 
1157 	/* don't know whether this is necessary: */
1158 	bio->bi_phys_segments = 0;
1159 	bio->bi_next = NULL;
1160 
1161 	/* these should be unchanged: */
1162 	/* bio->bi_end_io = drbd_endio_write_sec; */
1163 	/* bio->bi_vcnt = whatever; */
1164 
1165 	e->w.cb = e_end_block;
1166 
1167 	/* This is no longer a barrier request. */
1168 	bio->bi_rw &= ~(1UL << BIO_RW_BARRIER);
1169 
1170 	drbd_generic_make_request(mdev, DRBD_FAULT_DT_WR, bio);
1171 
1172 	return 1;
1173 }
1174 
1175 static int receive_Barrier(struct drbd_conf *mdev, struct p_header *h)
1176 {
1177 	int rv, issue_flush;
1178 	struct p_barrier *p = (struct p_barrier *)h;
1179 	struct drbd_epoch *epoch;
1180 
1181 	ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE;
1182 
1183 	rv = drbd_recv(mdev, h->payload, h->length);
1184 	ERR_IF(rv != h->length) return FALSE;
1185 
1186 	inc_unacked(mdev);
1187 
1188 	if (mdev->net_conf->wire_protocol != DRBD_PROT_C)
1189 		drbd_kick_lo(mdev);
1190 
1191 	mdev->current_epoch->barrier_nr = p->barrier;
1192 	rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR);
1193 
1194 	/* P_BARRIER_ACK may imply that the corresponding extent is dropped from
1195 	 * the activity log, which means it would not be resynced in case the
1196 	 * R_PRIMARY crashes now.
1197 	 * Therefore we must send the barrier_ack after the barrier request was
1198 	 * completed. */
1199 	switch (mdev->write_ordering) {
1200 	case WO_bio_barrier:
1201 	case WO_none:
1202 		if (rv == FE_RECYCLED)
1203 			return TRUE;
1204 		break;
1205 
1206 	case WO_bdev_flush:
1207 	case WO_drain_io:
1208 		if (rv == FE_STILL_LIVE) {
1209 			set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags);
1210 			drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
1211 			rv = drbd_flush_after_epoch(mdev, mdev->current_epoch);
1212 		}
1213 		if (rv == FE_RECYCLED)
1214 			return TRUE;
1215 
1216 		/* The asender will send all the ACKs and barrier ACKs out, since
1217 		   all EEs moved from the active_ee to the done_ee. We need to
1218 		   provide a new epoch object for the EEs that come in soon */
1219 		break;
1220 	}
1221 
1222 	/* receiver context, in the writeout path of the other node.
1223 	 * avoid potential distributed deadlock */
1224 	epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1225 	if (!epoch) {
1226 		dev_warn(DEV, "Allocation of an epoch failed, slowing down\n");
1227 		issue_flush = !test_and_set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags);
1228 		drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
1229 		if (issue_flush) {
1230 			rv = drbd_flush_after_epoch(mdev, mdev->current_epoch);
1231 			if (rv == FE_RECYCLED)
1232 				return TRUE;
1233 		}
1234 
1235 		drbd_wait_ee_list_empty(mdev, &mdev->done_ee);
1236 
1237 		return TRUE;
1238 	}
1239 
1240 	epoch->flags = 0;
1241 	atomic_set(&epoch->epoch_size, 0);
1242 	atomic_set(&epoch->active, 0);
1243 
1244 	spin_lock(&mdev->epoch_lock);
1245 	if (atomic_read(&mdev->current_epoch->epoch_size)) {
1246 		list_add(&epoch->list, &mdev->current_epoch->list);
1247 		mdev->current_epoch = epoch;
1248 		mdev->epochs++;
1249 	} else {
1250 		/* The current_epoch got recycled while we allocated this one... */
1251 		kfree(epoch);
1252 	}
1253 	spin_unlock(&mdev->epoch_lock);
1254 
1255 	return TRUE;
1256 }
1257 
1258 /* used from receive_RSDataReply (recv_resync_read)
1259  * and from receive_Data */
1260 static struct drbd_epoch_entry *
1261 read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __must_hold(local)
1262 {
1263 	struct drbd_epoch_entry *e;
1264 	struct bio_vec *bvec;
1265 	struct page *page;
1266 	struct bio *bio;
1267 	int dgs, ds, i, rr;
1268 	void *dig_in = mdev->int_dig_in;
1269 	void *dig_vv = mdev->int_dig_vv;
1270 
1271 	dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
1272 		crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
1273 
1274 	if (dgs) {
1275 		rr = drbd_recv(mdev, dig_in, dgs);
1276 		if (rr != dgs) {
1277 			dev_warn(DEV, "short read receiving data digest: read %d expected %d\n",
1278 			     rr, dgs);
1279 			return NULL;
1280 		}
1281 	}
1282 
1283 	data_size -= dgs;
1284 
1285 	ERR_IF(data_size &  0x1ff) return NULL;
1286 	ERR_IF(data_size >  DRBD_MAX_SEGMENT_SIZE) return NULL;
1287 
1288 	/* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1289 	 * "criss-cross" setup, that might cause write-out on some other DRBD,
1290 	 * which in turn might block on the other node at this very place.  */
1291 	e = drbd_alloc_ee(mdev, id, sector, data_size, GFP_NOIO);
1292 	if (!e)
1293 		return NULL;
1294 	bio = e->private_bio;
1295 	ds = data_size;
1296 	bio_for_each_segment(bvec, bio, i) {
1297 		page = bvec->bv_page;
1298 		rr = drbd_recv(mdev, kmap(page), min_t(int, ds, PAGE_SIZE));
1299 		kunmap(page);
1300 		if (rr != min_t(int, ds, PAGE_SIZE)) {
1301 			drbd_free_ee(mdev, e);
1302 			dev_warn(DEV, "short read receiving data: read %d expected %d\n",
1303 			     rr, min_t(int, ds, PAGE_SIZE));
1304 			return NULL;
1305 		}
1306 		ds -= rr;
1307 	}
1308 
1309 	if (dgs) {
1310 		drbd_csum(mdev, mdev->integrity_r_tfm, bio, dig_vv);
1311 		if (memcmp(dig_in, dig_vv, dgs)) {
1312 			dev_err(DEV, "Digest integrity check FAILED.\n");
1313 			drbd_bcast_ee(mdev, "digest failed",
1314 					dgs, dig_in, dig_vv, e);
1315 			drbd_free_ee(mdev, e);
1316 			return NULL;
1317 		}
1318 	}
1319 	mdev->recv_cnt += data_size>>9;
1320 	return e;
1321 }
1322 
1323 /* drbd_drain_block() just takes a data block
1324  * out of the socket input buffer, and discards it.
1325  */
1326 static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
1327 {
1328 	struct page *page;
1329 	int rr, rv = 1;
1330 	void *data;
1331 
1332 	page = drbd_pp_alloc(mdev, 1);
1333 
1334 	data = kmap(page);
1335 	while (data_size) {
1336 		rr = drbd_recv(mdev, data, min_t(int, data_size, PAGE_SIZE));
1337 		if (rr != min_t(int, data_size, PAGE_SIZE)) {
1338 			rv = 0;
1339 			dev_warn(DEV, "short read receiving data: read %d expected %d\n",
1340 			     rr, min_t(int, data_size, PAGE_SIZE));
1341 			break;
1342 		}
1343 		data_size -= rr;
1344 	}
1345 	kunmap(page);
1346 	drbd_pp_free(mdev, page);
1347 	return rv;
1348 }
1349 
1350 static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
1351 			   sector_t sector, int data_size)
1352 {
1353 	struct bio_vec *bvec;
1354 	struct bio *bio;
1355 	int dgs, rr, i, expect;
1356 	void *dig_in = mdev->int_dig_in;
1357 	void *dig_vv = mdev->int_dig_vv;
1358 
1359 	dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
1360 		crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
1361 
1362 	if (dgs) {
1363 		rr = drbd_recv(mdev, dig_in, dgs);
1364 		if (rr != dgs) {
1365 			dev_warn(DEV, "short read receiving data reply digest: read %d expected %d\n",
1366 			     rr, dgs);
1367 			return 0;
1368 		}
1369 	}
1370 
1371 	data_size -= dgs;
1372 
1373 	/* optimistically update recv_cnt.  if receiving fails below,
1374 	 * we disconnect anyways, and counters will be reset. */
1375 	mdev->recv_cnt += data_size>>9;
1376 
1377 	bio = req->master_bio;
1378 	D_ASSERT(sector == bio->bi_sector);
1379 
1380 	bio_for_each_segment(bvec, bio, i) {
1381 		expect = min_t(int, data_size, bvec->bv_len);
1382 		rr = drbd_recv(mdev,
1383 			     kmap(bvec->bv_page)+bvec->bv_offset,
1384 			     expect);
1385 		kunmap(bvec->bv_page);
1386 		if (rr != expect) {
1387 			dev_warn(DEV, "short read receiving data reply: "
1388 			     "read %d expected %d\n",
1389 			     rr, expect);
1390 			return 0;
1391 		}
1392 		data_size -= rr;
1393 	}
1394 
1395 	if (dgs) {
1396 		drbd_csum(mdev, mdev->integrity_r_tfm, bio, dig_vv);
1397 		if (memcmp(dig_in, dig_vv, dgs)) {
1398 			dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n");
1399 			return 0;
1400 		}
1401 	}
1402 
1403 	D_ASSERT(data_size == 0);
1404 	return 1;
1405 }
1406 
1407 /* e_end_resync_block() is called via
1408  * drbd_process_done_ee() by asender only */
1409 static int e_end_resync_block(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1410 {
1411 	struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1412 	sector_t sector = e->sector;
1413 	int ok;
1414 
1415 	D_ASSERT(hlist_unhashed(&e->colision));
1416 
1417 	if (likely(drbd_bio_uptodate(e->private_bio))) {
1418 		drbd_set_in_sync(mdev, sector, e->size);
1419 		ok = drbd_send_ack(mdev, P_RS_WRITE_ACK, e);
1420 	} else {
1421 		/* Record failure to sync */
1422 		drbd_rs_failed_io(mdev, sector, e->size);
1423 
1424 		ok  = drbd_send_ack(mdev, P_NEG_ACK, e);
1425 	}
1426 	dec_unacked(mdev);
1427 
1428 	return ok;
1429 }
1430 
1431 static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local)
1432 {
1433 	struct drbd_epoch_entry *e;
1434 
1435 	e = read_in_block(mdev, ID_SYNCER, sector, data_size);
1436 	if (!e) {
1437 		put_ldev(mdev);
1438 		return FALSE;
1439 	}
1440 
1441 	dec_rs_pending(mdev);
1442 
1443 	e->private_bio->bi_end_io = drbd_endio_write_sec;
1444 	e->private_bio->bi_rw = WRITE;
1445 	e->w.cb = e_end_resync_block;
1446 
1447 	inc_unacked(mdev);
1448 	/* corresponding dec_unacked() in e_end_resync_block()
1449 	 * respective _drbd_clear_done_ee */
1450 
1451 	spin_lock_irq(&mdev->req_lock);
1452 	list_add(&e->w.list, &mdev->sync_ee);
1453 	spin_unlock_irq(&mdev->req_lock);
1454 
1455 	drbd_generic_make_request(mdev, DRBD_FAULT_RS_WR, e->private_bio);
1456 	/* accounting done in endio */
1457 
1458 	maybe_kick_lo(mdev);
1459 	return TRUE;
1460 }
1461 
1462 static int receive_DataReply(struct drbd_conf *mdev, struct p_header *h)
1463 {
1464 	struct drbd_request *req;
1465 	sector_t sector;
1466 	unsigned int header_size, data_size;
1467 	int ok;
1468 	struct p_data *p = (struct p_data *)h;
1469 
1470 	header_size = sizeof(*p) - sizeof(*h);
1471 	data_size   = h->length  - header_size;
1472 
1473 	ERR_IF(data_size == 0) return FALSE;
1474 
1475 	if (drbd_recv(mdev, h->payload, header_size) != header_size)
1476 		return FALSE;
1477 
1478 	sector = be64_to_cpu(p->sector);
1479 
1480 	spin_lock_irq(&mdev->req_lock);
1481 	req = _ar_id_to_req(mdev, p->block_id, sector);
1482 	spin_unlock_irq(&mdev->req_lock);
1483 	if (unlikely(!req)) {
1484 		dev_err(DEV, "Got a corrupt block_id/sector pair(1).\n");
1485 		return FALSE;
1486 	}
1487 
1488 	/* hlist_del(&req->colision) is done in _req_may_be_done, to avoid
1489 	 * special casing it there for the various failure cases.
1490 	 * still no race with drbd_fail_pending_reads */
1491 	ok = recv_dless_read(mdev, req, sector, data_size);
1492 
1493 	if (ok)
1494 		req_mod(req, data_received);
1495 	/* else: nothing. handled from drbd_disconnect...
1496 	 * I don't think we may complete this just yet
1497 	 * in case we are "on-disconnect: freeze" */
1498 
1499 	return ok;
1500 }
1501 
1502 static int receive_RSDataReply(struct drbd_conf *mdev, struct p_header *h)
1503 {
1504 	sector_t sector;
1505 	unsigned int header_size, data_size;
1506 	int ok;
1507 	struct p_data *p = (struct p_data *)h;
1508 
1509 	header_size = sizeof(*p) - sizeof(*h);
1510 	data_size   = h->length  - header_size;
1511 
1512 	ERR_IF(data_size == 0) return FALSE;
1513 
1514 	if (drbd_recv(mdev, h->payload, header_size) != header_size)
1515 		return FALSE;
1516 
1517 	sector = be64_to_cpu(p->sector);
1518 	D_ASSERT(p->block_id == ID_SYNCER);
1519 
1520 	if (get_ldev(mdev)) {
1521 		/* data is submitted to disk within recv_resync_read.
1522 		 * corresponding put_ldev done below on error,
1523 		 * or in drbd_endio_write_sec. */
1524 		ok = recv_resync_read(mdev, sector, data_size);
1525 	} else {
1526 		if (__ratelimit(&drbd_ratelimit_state))
1527 			dev_err(DEV, "Can not write resync data to local disk.\n");
1528 
1529 		ok = drbd_drain_block(mdev, data_size);
1530 
1531 		drbd_send_ack_dp(mdev, P_NEG_ACK, p);
1532 	}
1533 
1534 	return ok;
1535 }
1536 
1537 /* e_end_block() is called via drbd_process_done_ee().
1538  * this means this function only runs in the asender thread
1539  */
1540 static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1541 {
1542 	struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1543 	sector_t sector = e->sector;
1544 	struct drbd_epoch *epoch;
1545 	int ok = 1, pcmd;
1546 
1547 	if (e->flags & EE_IS_BARRIER) {
1548 		epoch = previous_epoch(mdev, e->epoch);
1549 		if (epoch)
1550 			drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE + (cancel ? EV_CLEANUP : 0));
1551 	}
1552 
1553 	if (mdev->net_conf->wire_protocol == DRBD_PROT_C) {
1554 		if (likely(drbd_bio_uptodate(e->private_bio))) {
1555 			pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
1556 				mdev->state.conn <= C_PAUSED_SYNC_T &&
1557 				e->flags & EE_MAY_SET_IN_SYNC) ?
1558 				P_RS_WRITE_ACK : P_WRITE_ACK;
1559 			ok &= drbd_send_ack(mdev, pcmd, e);
1560 			if (pcmd == P_RS_WRITE_ACK)
1561 				drbd_set_in_sync(mdev, sector, e->size);
1562 		} else {
1563 			ok  = drbd_send_ack(mdev, P_NEG_ACK, e);
1564 			/* we expect it to be marked out of sync anyways...
1565 			 * maybe assert this?  */
1566 		}
1567 		dec_unacked(mdev);
1568 	}
1569 	/* we delete from the conflict detection hash _after_ we sent out the
1570 	 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right.  */
1571 	if (mdev->net_conf->two_primaries) {
1572 		spin_lock_irq(&mdev->req_lock);
1573 		D_ASSERT(!hlist_unhashed(&e->colision));
1574 		hlist_del_init(&e->colision);
1575 		spin_unlock_irq(&mdev->req_lock);
1576 	} else {
1577 		D_ASSERT(hlist_unhashed(&e->colision));
1578 	}
1579 
1580 	drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
1581 
1582 	return ok;
1583 }
1584 
1585 static int e_send_discard_ack(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1586 {
1587 	struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1588 	int ok = 1;
1589 
1590 	D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
1591 	ok = drbd_send_ack(mdev, P_DISCARD_ACK, e);
1592 
1593 	spin_lock_irq(&mdev->req_lock);
1594 	D_ASSERT(!hlist_unhashed(&e->colision));
1595 	hlist_del_init(&e->colision);
1596 	spin_unlock_irq(&mdev->req_lock);
1597 
1598 	dec_unacked(mdev);
1599 
1600 	return ok;
1601 }
1602 
1603 /* Called from receive_Data.
1604  * Synchronize packets on sock with packets on msock.
1605  *
1606  * This is here so even when a P_DATA packet traveling via sock overtook an Ack
1607  * packet traveling on msock, they are still processed in the order they have
1608  * been sent.
1609  *
1610  * Note: we don't care for Ack packets overtaking P_DATA packets.
1611  *
1612  * In case packet_seq is larger than mdev->peer_seq number, there are
1613  * outstanding packets on the msock. We wait for them to arrive.
1614  * In case we are the logically next packet, we update mdev->peer_seq
1615  * ourselves. Correctly handles 32bit wrap around.
1616  *
1617  * Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
1618  * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
1619  * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
1620  * 1<<9 == 512 seconds aka ages for the 32bit wrap around...
1621  *
1622  * returns 0 if we may process the packet,
1623  * -ERESTARTSYS if we were interrupted (by disconnect signal). */
1624 static int drbd_wait_peer_seq(struct drbd_conf *mdev, const u32 packet_seq)
1625 {
1626 	DEFINE_WAIT(wait);
1627 	unsigned int p_seq;
1628 	long timeout;
1629 	int ret = 0;
1630 	spin_lock(&mdev->peer_seq_lock);
1631 	for (;;) {
1632 		prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
1633 		if (seq_le(packet_seq, mdev->peer_seq+1))
1634 			break;
1635 		if (signal_pending(current)) {
1636 			ret = -ERESTARTSYS;
1637 			break;
1638 		}
1639 		p_seq = mdev->peer_seq;
1640 		spin_unlock(&mdev->peer_seq_lock);
1641 		timeout = schedule_timeout(30*HZ);
1642 		spin_lock(&mdev->peer_seq_lock);
1643 		if (timeout == 0 && p_seq == mdev->peer_seq) {
1644 			ret = -ETIMEDOUT;
1645 			dev_err(DEV, "ASSERT FAILED waited 30 seconds for sequence update, forcing reconnect\n");
1646 			break;
1647 		}
1648 	}
1649 	finish_wait(&mdev->seq_wait, &wait);
1650 	if (mdev->peer_seq+1 == packet_seq)
1651 		mdev->peer_seq++;
1652 	spin_unlock(&mdev->peer_seq_lock);
1653 	return ret;
1654 }
1655 
1656 /* mirrored write */
1657 static int receive_Data(struct drbd_conf *mdev, struct p_header *h)
1658 {
1659 	sector_t sector;
1660 	struct drbd_epoch_entry *e;
1661 	struct p_data *p = (struct p_data *)h;
1662 	int header_size, data_size;
1663 	int rw = WRITE;
1664 	u32 dp_flags;
1665 
1666 	header_size = sizeof(*p) - sizeof(*h);
1667 	data_size   = h->length  - header_size;
1668 
1669 	ERR_IF(data_size == 0) return FALSE;
1670 
1671 	if (drbd_recv(mdev, h->payload, header_size) != header_size)
1672 		return FALSE;
1673 
1674 	if (!get_ldev(mdev)) {
1675 		if (__ratelimit(&drbd_ratelimit_state))
1676 			dev_err(DEV, "Can not write mirrored data block "
1677 			    "to local disk.\n");
1678 		spin_lock(&mdev->peer_seq_lock);
1679 		if (mdev->peer_seq+1 == be32_to_cpu(p->seq_num))
1680 			mdev->peer_seq++;
1681 		spin_unlock(&mdev->peer_seq_lock);
1682 
1683 		drbd_send_ack_dp(mdev, P_NEG_ACK, p);
1684 		atomic_inc(&mdev->current_epoch->epoch_size);
1685 		return drbd_drain_block(mdev, data_size);
1686 	}
1687 
1688 	/* get_ldev(mdev) successful.
1689 	 * Corresponding put_ldev done either below (on various errors),
1690 	 * or in drbd_endio_write_sec, if we successfully submit the data at
1691 	 * the end of this function. */
1692 
1693 	sector = be64_to_cpu(p->sector);
1694 	e = read_in_block(mdev, p->block_id, sector, data_size);
1695 	if (!e) {
1696 		put_ldev(mdev);
1697 		return FALSE;
1698 	}
1699 
1700 	e->private_bio->bi_end_io = drbd_endio_write_sec;
1701 	e->w.cb = e_end_block;
1702 
1703 	spin_lock(&mdev->epoch_lock);
1704 	e->epoch = mdev->current_epoch;
1705 	atomic_inc(&e->epoch->epoch_size);
1706 	atomic_inc(&e->epoch->active);
1707 
1708 	if (mdev->write_ordering == WO_bio_barrier && atomic_read(&e->epoch->epoch_size) == 1) {
1709 		struct drbd_epoch *epoch;
1710 		/* Issue a barrier if we start a new epoch, and the previous epoch
1711 		   was not a epoch containing a single request which already was
1712 		   a Barrier. */
1713 		epoch = list_entry(e->epoch->list.prev, struct drbd_epoch, list);
1714 		if (epoch == e->epoch) {
1715 			set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags);
1716 			rw |= (1<<BIO_RW_BARRIER);
1717 			e->flags |= EE_IS_BARRIER;
1718 		} else {
1719 			if (atomic_read(&epoch->epoch_size) > 1 ||
1720 			    !test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) {
1721 				set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags);
1722 				set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags);
1723 				rw |= (1<<BIO_RW_BARRIER);
1724 				e->flags |= EE_IS_BARRIER;
1725 			}
1726 		}
1727 	}
1728 	spin_unlock(&mdev->epoch_lock);
1729 
1730 	dp_flags = be32_to_cpu(p->dp_flags);
1731 	if (dp_flags & DP_HARDBARRIER) {
1732 		dev_err(DEV, "ASSERT FAILED would have submitted barrier request\n");
1733 		/* rw |= (1<<BIO_RW_BARRIER); */
1734 	}
1735 	if (dp_flags & DP_RW_SYNC)
1736 		rw |= (1<<BIO_RW_SYNCIO) | (1<<BIO_RW_UNPLUG);
1737 	if (dp_flags & DP_MAY_SET_IN_SYNC)
1738 		e->flags |= EE_MAY_SET_IN_SYNC;
1739 
1740 	/* I'm the receiver, I do hold a net_cnt reference. */
1741 	if (!mdev->net_conf->two_primaries) {
1742 		spin_lock_irq(&mdev->req_lock);
1743 	} else {
1744 		/* don't get the req_lock yet,
1745 		 * we may sleep in drbd_wait_peer_seq */
1746 		const int size = e->size;
1747 		const int discard = test_bit(DISCARD_CONCURRENT, &mdev->flags);
1748 		DEFINE_WAIT(wait);
1749 		struct drbd_request *i;
1750 		struct hlist_node *n;
1751 		struct hlist_head *slot;
1752 		int first;
1753 
1754 		D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
1755 		BUG_ON(mdev->ee_hash == NULL);
1756 		BUG_ON(mdev->tl_hash == NULL);
1757 
1758 		/* conflict detection and handling:
1759 		 * 1. wait on the sequence number,
1760 		 *    in case this data packet overtook ACK packets.
1761 		 * 2. check our hash tables for conflicting requests.
1762 		 *    we only need to walk the tl_hash, since an ee can not
1763 		 *    have a conflict with an other ee: on the submitting
1764 		 *    node, the corresponding req had already been conflicting,
1765 		 *    and a conflicting req is never sent.
1766 		 *
1767 		 * Note: for two_primaries, we are protocol C,
1768 		 * so there cannot be any request that is DONE
1769 		 * but still on the transfer log.
1770 		 *
1771 		 * unconditionally add to the ee_hash.
1772 		 *
1773 		 * if no conflicting request is found:
1774 		 *    submit.
1775 		 *
1776 		 * if any conflicting request is found
1777 		 * that has not yet been acked,
1778 		 * AND I have the "discard concurrent writes" flag:
1779 		 *	 queue (via done_ee) the P_DISCARD_ACK; OUT.
1780 		 *
1781 		 * if any conflicting request is found:
1782 		 *	 block the receiver, waiting on misc_wait
1783 		 *	 until no more conflicting requests are there,
1784 		 *	 or we get interrupted (disconnect).
1785 		 *
1786 		 *	 we do not just write after local io completion of those
1787 		 *	 requests, but only after req is done completely, i.e.
1788 		 *	 we wait for the P_DISCARD_ACK to arrive!
1789 		 *
1790 		 *	 then proceed normally, i.e. submit.
1791 		 */
1792 		if (drbd_wait_peer_seq(mdev, be32_to_cpu(p->seq_num)))
1793 			goto out_interrupted;
1794 
1795 		spin_lock_irq(&mdev->req_lock);
1796 
1797 		hlist_add_head(&e->colision, ee_hash_slot(mdev, sector));
1798 
1799 #define OVERLAPS overlaps(i->sector, i->size, sector, size)
1800 		slot = tl_hash_slot(mdev, sector);
1801 		first = 1;
1802 		for (;;) {
1803 			int have_unacked = 0;
1804 			int have_conflict = 0;
1805 			prepare_to_wait(&mdev->misc_wait, &wait,
1806 				TASK_INTERRUPTIBLE);
1807 			hlist_for_each_entry(i, n, slot, colision) {
1808 				if (OVERLAPS) {
1809 					/* only ALERT on first iteration,
1810 					 * we may be woken up early... */
1811 					if (first)
1812 						dev_alert(DEV, "%s[%u] Concurrent local write detected!"
1813 						      "	new: %llus +%u; pending: %llus +%u\n",
1814 						      current->comm, current->pid,
1815 						      (unsigned long long)sector, size,
1816 						      (unsigned long long)i->sector, i->size);
1817 					if (i->rq_state & RQ_NET_PENDING)
1818 						++have_unacked;
1819 					++have_conflict;
1820 				}
1821 			}
1822 #undef OVERLAPS
1823 			if (!have_conflict)
1824 				break;
1825 
1826 			/* Discard Ack only for the _first_ iteration */
1827 			if (first && discard && have_unacked) {
1828 				dev_alert(DEV, "Concurrent write! [DISCARD BY FLAG] sec=%llus\n",
1829 				     (unsigned long long)sector);
1830 				inc_unacked(mdev);
1831 				e->w.cb = e_send_discard_ack;
1832 				list_add_tail(&e->w.list, &mdev->done_ee);
1833 
1834 				spin_unlock_irq(&mdev->req_lock);
1835 
1836 				/* we could probably send that P_DISCARD_ACK ourselves,
1837 				 * but I don't like the receiver using the msock */
1838 
1839 				put_ldev(mdev);
1840 				wake_asender(mdev);
1841 				finish_wait(&mdev->misc_wait, &wait);
1842 				return TRUE;
1843 			}
1844 
1845 			if (signal_pending(current)) {
1846 				hlist_del_init(&e->colision);
1847 
1848 				spin_unlock_irq(&mdev->req_lock);
1849 
1850 				finish_wait(&mdev->misc_wait, &wait);
1851 				goto out_interrupted;
1852 			}
1853 
1854 			spin_unlock_irq(&mdev->req_lock);
1855 			if (first) {
1856 				first = 0;
1857 				dev_alert(DEV, "Concurrent write! [W AFTERWARDS] "
1858 				     "sec=%llus\n", (unsigned long long)sector);
1859 			} else if (discard) {
1860 				/* we had none on the first iteration.
1861 				 * there must be none now. */
1862 				D_ASSERT(have_unacked == 0);
1863 			}
1864 			schedule();
1865 			spin_lock_irq(&mdev->req_lock);
1866 		}
1867 		finish_wait(&mdev->misc_wait, &wait);
1868 	}
1869 
1870 	list_add(&e->w.list, &mdev->active_ee);
1871 	spin_unlock_irq(&mdev->req_lock);
1872 
1873 	switch (mdev->net_conf->wire_protocol) {
1874 	case DRBD_PROT_C:
1875 		inc_unacked(mdev);
1876 		/* corresponding dec_unacked() in e_end_block()
1877 		 * respective _drbd_clear_done_ee */
1878 		break;
1879 	case DRBD_PROT_B:
1880 		/* I really don't like it that the receiver thread
1881 		 * sends on the msock, but anyways */
1882 		drbd_send_ack(mdev, P_RECV_ACK, e);
1883 		break;
1884 	case DRBD_PROT_A:
1885 		/* nothing to do */
1886 		break;
1887 	}
1888 
1889 	if (mdev->state.pdsk == D_DISKLESS) {
1890 		/* In case we have the only disk of the cluster, */
1891 		drbd_set_out_of_sync(mdev, e->sector, e->size);
1892 		e->flags |= EE_CALL_AL_COMPLETE_IO;
1893 		drbd_al_begin_io(mdev, e->sector);
1894 	}
1895 
1896 	e->private_bio->bi_rw = rw;
1897 	drbd_generic_make_request(mdev, DRBD_FAULT_DT_WR, e->private_bio);
1898 	/* accounting done in endio */
1899 
1900 	maybe_kick_lo(mdev);
1901 	return TRUE;
1902 
1903 out_interrupted:
1904 	/* yes, the epoch_size now is imbalanced.
1905 	 * but we drop the connection anyways, so we don't have a chance to
1906 	 * receive a barrier... atomic_inc(&mdev->epoch_size); */
1907 	put_ldev(mdev);
1908 	drbd_free_ee(mdev, e);
1909 	return FALSE;
1910 }
1911 
1912 static int receive_DataRequest(struct drbd_conf *mdev, struct p_header *h)
1913 {
1914 	sector_t sector;
1915 	const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
1916 	struct drbd_epoch_entry *e;
1917 	struct digest_info *di = NULL;
1918 	int size, digest_size;
1919 	unsigned int fault_type;
1920 	struct p_block_req *p =
1921 		(struct p_block_req *)h;
1922 	const int brps = sizeof(*p)-sizeof(*h);
1923 
1924 	if (drbd_recv(mdev, h->payload, brps) != brps)
1925 		return FALSE;
1926 
1927 	sector = be64_to_cpu(p->sector);
1928 	size   = be32_to_cpu(p->blksize);
1929 
1930 	if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) {
1931 		dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
1932 				(unsigned long long)sector, size);
1933 		return FALSE;
1934 	}
1935 	if (sector + (size>>9) > capacity) {
1936 		dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
1937 				(unsigned long long)sector, size);
1938 		return FALSE;
1939 	}
1940 
1941 	if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) {
1942 		if (__ratelimit(&drbd_ratelimit_state))
1943 			dev_err(DEV, "Can not satisfy peer's read request, "
1944 			    "no local data.\n");
1945 		drbd_send_ack_rp(mdev, h->command == P_DATA_REQUEST ? P_NEG_DREPLY :
1946 				 P_NEG_RS_DREPLY , p);
1947 		return TRUE;
1948 	}
1949 
1950 	/* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1951 	 * "criss-cross" setup, that might cause write-out on some other DRBD,
1952 	 * which in turn might block on the other node at this very place.  */
1953 	e = drbd_alloc_ee(mdev, p->block_id, sector, size, GFP_NOIO);
1954 	if (!e) {
1955 		put_ldev(mdev);
1956 		return FALSE;
1957 	}
1958 
1959 	e->private_bio->bi_rw = READ;
1960 	e->private_bio->bi_end_io = drbd_endio_read_sec;
1961 
1962 	switch (h->command) {
1963 	case P_DATA_REQUEST:
1964 		e->w.cb = w_e_end_data_req;
1965 		fault_type = DRBD_FAULT_DT_RD;
1966 		break;
1967 	case P_RS_DATA_REQUEST:
1968 		e->w.cb = w_e_end_rsdata_req;
1969 		fault_type = DRBD_FAULT_RS_RD;
1970 		/* Eventually this should become asynchronously. Currently it
1971 		 * blocks the whole receiver just to delay the reading of a
1972 		 * resync data block.
1973 		 * the drbd_work_queue mechanism is made for this...
1974 		 */
1975 		if (!drbd_rs_begin_io(mdev, sector)) {
1976 			/* we have been interrupted,
1977 			 * probably connection lost! */
1978 			D_ASSERT(signal_pending(current));
1979 			goto out_free_e;
1980 		}
1981 		break;
1982 
1983 	case P_OV_REPLY:
1984 	case P_CSUM_RS_REQUEST:
1985 		fault_type = DRBD_FAULT_RS_RD;
1986 		digest_size = h->length - brps ;
1987 		di = kmalloc(sizeof(*di) + digest_size, GFP_NOIO);
1988 		if (!di)
1989 			goto out_free_e;
1990 
1991 		di->digest_size = digest_size;
1992 		di->digest = (((char *)di)+sizeof(struct digest_info));
1993 
1994 		if (drbd_recv(mdev, di->digest, digest_size) != digest_size)
1995 			goto out_free_e;
1996 
1997 		e->block_id = (u64)(unsigned long)di;
1998 		if (h->command == P_CSUM_RS_REQUEST) {
1999 			D_ASSERT(mdev->agreed_pro_version >= 89);
2000 			e->w.cb = w_e_end_csum_rs_req;
2001 		} else if (h->command == P_OV_REPLY) {
2002 			e->w.cb = w_e_end_ov_reply;
2003 			dec_rs_pending(mdev);
2004 			break;
2005 		}
2006 
2007 		if (!drbd_rs_begin_io(mdev, sector)) {
2008 			/* we have been interrupted, probably connection lost! */
2009 			D_ASSERT(signal_pending(current));
2010 			goto out_free_e;
2011 		}
2012 		break;
2013 
2014 	case P_OV_REQUEST:
2015 		if (mdev->state.conn >= C_CONNECTED &&
2016 		    mdev->state.conn != C_VERIFY_T)
2017 			dev_warn(DEV, "ASSERT FAILED: got P_OV_REQUEST while being %s\n",
2018 				drbd_conn_str(mdev->state.conn));
2019 		if (mdev->ov_start_sector == ~(sector_t)0 &&
2020 		    mdev->agreed_pro_version >= 90) {
2021 			mdev->ov_start_sector = sector;
2022 			mdev->ov_position = sector;
2023 			mdev->ov_left = mdev->rs_total - BM_SECT_TO_BIT(sector);
2024 			dev_info(DEV, "Online Verify start sector: %llu\n",
2025 					(unsigned long long)sector);
2026 		}
2027 		e->w.cb = w_e_end_ov_req;
2028 		fault_type = DRBD_FAULT_RS_RD;
2029 		/* Eventually this should become asynchronous. Currently it
2030 		 * blocks the whole receiver just to delay the reading of a
2031 		 * resync data block.
2032 		 * the drbd_work_queue mechanism is made for this...
2033 		 */
2034 		if (!drbd_rs_begin_io(mdev, sector)) {
2035 			/* we have been interrupted,
2036 			 * probably connection lost! */
2037 			D_ASSERT(signal_pending(current));
2038 			goto out_free_e;
2039 		}
2040 		break;
2041 
2042 
2043 	default:
2044 		dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n",
2045 		    cmdname(h->command));
2046 		fault_type = DRBD_FAULT_MAX;
2047 	}
2048 
2049 	spin_lock_irq(&mdev->req_lock);
2050 	list_add(&e->w.list, &mdev->read_ee);
2051 	spin_unlock_irq(&mdev->req_lock);
2052 
2053 	inc_unacked(mdev);
2054 
2055 	drbd_generic_make_request(mdev, fault_type, e->private_bio);
2056 	maybe_kick_lo(mdev);
2057 
2058 	return TRUE;
2059 
2060 out_free_e:
2061 	kfree(di);
2062 	put_ldev(mdev);
2063 	drbd_free_ee(mdev, e);
2064 	return FALSE;
2065 }
2066 
2067 static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
2068 {
2069 	int self, peer, rv = -100;
2070 	unsigned long ch_self, ch_peer;
2071 
2072 	self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2073 	peer = mdev->p_uuid[UI_BITMAP] & 1;
2074 
2075 	ch_peer = mdev->p_uuid[UI_SIZE];
2076 	ch_self = mdev->comm_bm_set;
2077 
2078 	switch (mdev->net_conf->after_sb_0p) {
2079 	case ASB_CONSENSUS:
2080 	case ASB_DISCARD_SECONDARY:
2081 	case ASB_CALL_HELPER:
2082 		dev_err(DEV, "Configuration error.\n");
2083 		break;
2084 	case ASB_DISCONNECT:
2085 		break;
2086 	case ASB_DISCARD_YOUNGER_PRI:
2087 		if (self == 0 && peer == 1) {
2088 			rv = -1;
2089 			break;
2090 		}
2091 		if (self == 1 && peer == 0) {
2092 			rv =  1;
2093 			break;
2094 		}
2095 		/* Else fall through to one of the other strategies... */
2096 	case ASB_DISCARD_OLDER_PRI:
2097 		if (self == 0 && peer == 1) {
2098 			rv = 1;
2099 			break;
2100 		}
2101 		if (self == 1 && peer == 0) {
2102 			rv = -1;
2103 			break;
2104 		}
2105 		/* Else fall through to one of the other strategies... */
2106 		dev_warn(DEV, "Discard younger/older primary did not find a decision\n"
2107 		     "Using discard-least-changes instead\n");
2108 	case ASB_DISCARD_ZERO_CHG:
2109 		if (ch_peer == 0 && ch_self == 0) {
2110 			rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
2111 				? -1 : 1;
2112 			break;
2113 		} else {
2114 			if (ch_peer == 0) { rv =  1; break; }
2115 			if (ch_self == 0) { rv = -1; break; }
2116 		}
2117 		if (mdev->net_conf->after_sb_0p == ASB_DISCARD_ZERO_CHG)
2118 			break;
2119 	case ASB_DISCARD_LEAST_CHG:
2120 		if	(ch_self < ch_peer)
2121 			rv = -1;
2122 		else if (ch_self > ch_peer)
2123 			rv =  1;
2124 		else /* ( ch_self == ch_peer ) */
2125 		     /* Well, then use something else. */
2126 			rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
2127 				? -1 : 1;
2128 		break;
2129 	case ASB_DISCARD_LOCAL:
2130 		rv = -1;
2131 		break;
2132 	case ASB_DISCARD_REMOTE:
2133 		rv =  1;
2134 	}
2135 
2136 	return rv;
2137 }
2138 
2139 static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
2140 {
2141 	int self, peer, hg, rv = -100;
2142 
2143 	self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2144 	peer = mdev->p_uuid[UI_BITMAP] & 1;
2145 
2146 	switch (mdev->net_conf->after_sb_1p) {
2147 	case ASB_DISCARD_YOUNGER_PRI:
2148 	case ASB_DISCARD_OLDER_PRI:
2149 	case ASB_DISCARD_LEAST_CHG:
2150 	case ASB_DISCARD_LOCAL:
2151 	case ASB_DISCARD_REMOTE:
2152 		dev_err(DEV, "Configuration error.\n");
2153 		break;
2154 	case ASB_DISCONNECT:
2155 		break;
2156 	case ASB_CONSENSUS:
2157 		hg = drbd_asb_recover_0p(mdev);
2158 		if (hg == -1 && mdev->state.role == R_SECONDARY)
2159 			rv = hg;
2160 		if (hg == 1  && mdev->state.role == R_PRIMARY)
2161 			rv = hg;
2162 		break;
2163 	case ASB_VIOLENTLY:
2164 		rv = drbd_asb_recover_0p(mdev);
2165 		break;
2166 	case ASB_DISCARD_SECONDARY:
2167 		return mdev->state.role == R_PRIMARY ? 1 : -1;
2168 	case ASB_CALL_HELPER:
2169 		hg = drbd_asb_recover_0p(mdev);
2170 		if (hg == -1 && mdev->state.role == R_PRIMARY) {
2171 			self = drbd_set_role(mdev, R_SECONDARY, 0);
2172 			 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2173 			  * we might be here in C_WF_REPORT_PARAMS which is transient.
2174 			  * we do not need to wait for the after state change work either. */
2175 			self = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2176 			if (self != SS_SUCCESS) {
2177 				drbd_khelper(mdev, "pri-lost-after-sb");
2178 			} else {
2179 				dev_warn(DEV, "Successfully gave up primary role.\n");
2180 				rv = hg;
2181 			}
2182 		} else
2183 			rv = hg;
2184 	}
2185 
2186 	return rv;
2187 }
2188 
2189 static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
2190 {
2191 	int self, peer, hg, rv = -100;
2192 
2193 	self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2194 	peer = mdev->p_uuid[UI_BITMAP] & 1;
2195 
2196 	switch (mdev->net_conf->after_sb_2p) {
2197 	case ASB_DISCARD_YOUNGER_PRI:
2198 	case ASB_DISCARD_OLDER_PRI:
2199 	case ASB_DISCARD_LEAST_CHG:
2200 	case ASB_DISCARD_LOCAL:
2201 	case ASB_DISCARD_REMOTE:
2202 	case ASB_CONSENSUS:
2203 	case ASB_DISCARD_SECONDARY:
2204 		dev_err(DEV, "Configuration error.\n");
2205 		break;
2206 	case ASB_VIOLENTLY:
2207 		rv = drbd_asb_recover_0p(mdev);
2208 		break;
2209 	case ASB_DISCONNECT:
2210 		break;
2211 	case ASB_CALL_HELPER:
2212 		hg = drbd_asb_recover_0p(mdev);
2213 		if (hg == -1) {
2214 			 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2215 			  * we might be here in C_WF_REPORT_PARAMS which is transient.
2216 			  * we do not need to wait for the after state change work either. */
2217 			self = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2218 			if (self != SS_SUCCESS) {
2219 				drbd_khelper(mdev, "pri-lost-after-sb");
2220 			} else {
2221 				dev_warn(DEV, "Successfully gave up primary role.\n");
2222 				rv = hg;
2223 			}
2224 		} else
2225 			rv = hg;
2226 	}
2227 
2228 	return rv;
2229 }
2230 
2231 static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid,
2232 			   u64 bits, u64 flags)
2233 {
2234 	if (!uuid) {
2235 		dev_info(DEV, "%s uuid info vanished while I was looking!\n", text);
2236 		return;
2237 	}
2238 	dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
2239 	     text,
2240 	     (unsigned long long)uuid[UI_CURRENT],
2241 	     (unsigned long long)uuid[UI_BITMAP],
2242 	     (unsigned long long)uuid[UI_HISTORY_START],
2243 	     (unsigned long long)uuid[UI_HISTORY_END],
2244 	     (unsigned long long)bits,
2245 	     (unsigned long long)flags);
2246 }
2247 
2248 /*
2249   100	after split brain try auto recover
2250     2	C_SYNC_SOURCE set BitMap
2251     1	C_SYNC_SOURCE use BitMap
2252     0	no Sync
2253    -1	C_SYNC_TARGET use BitMap
2254    -2	C_SYNC_TARGET set BitMap
2255  -100	after split brain, disconnect
2256 -1000	unrelated data
2257  */
2258 static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local)
2259 {
2260 	u64 self, peer;
2261 	int i, j;
2262 
2263 	self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2264 	peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2265 
2266 	*rule_nr = 10;
2267 	if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED)
2268 		return 0;
2269 
2270 	*rule_nr = 20;
2271 	if ((self == UUID_JUST_CREATED || self == (u64)0) &&
2272 	     peer != UUID_JUST_CREATED)
2273 		return -2;
2274 
2275 	*rule_nr = 30;
2276 	if (self != UUID_JUST_CREATED &&
2277 	    (peer == UUID_JUST_CREATED || peer == (u64)0))
2278 		return 2;
2279 
2280 	if (self == peer) {
2281 		int rct, dc; /* roles at crash time */
2282 
2283 		if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) {
2284 
2285 			if (mdev->agreed_pro_version < 91)
2286 				return -1001;
2287 
2288 			if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
2289 			    (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
2290 				dev_info(DEV, "was SyncSource, missed the resync finished event, corrected myself:\n");
2291 				drbd_uuid_set_bm(mdev, 0UL);
2292 
2293 				drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2294 					       mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2295 				*rule_nr = 34;
2296 			} else {
2297 				dev_info(DEV, "was SyncSource (peer failed to write sync_uuid)\n");
2298 				*rule_nr = 36;
2299 			}
2300 
2301 			return 1;
2302 		}
2303 
2304 		if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) {
2305 
2306 			if (mdev->agreed_pro_version < 91)
2307 				return -1001;
2308 
2309 			if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) &&
2310 			    (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
2311 				dev_info(DEV, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
2312 
2313 				mdev->p_uuid[UI_HISTORY_START + 1] = mdev->p_uuid[UI_HISTORY_START];
2314 				mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_BITMAP];
2315 				mdev->p_uuid[UI_BITMAP] = 0UL;
2316 
2317 				drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2318 				*rule_nr = 35;
2319 			} else {
2320 				dev_info(DEV, "was SyncTarget (failed to write sync_uuid)\n");
2321 				*rule_nr = 37;
2322 			}
2323 
2324 			return -1;
2325 		}
2326 
2327 		/* Common power [off|failure] */
2328 		rct = (test_bit(CRASHED_PRIMARY, &mdev->flags) ? 1 : 0) +
2329 			(mdev->p_uuid[UI_FLAGS] & 2);
2330 		/* lowest bit is set when we were primary,
2331 		 * next bit (weight 2) is set when peer was primary */
2332 		*rule_nr = 40;
2333 
2334 		switch (rct) {
2335 		case 0: /* !self_pri && !peer_pri */ return 0;
2336 		case 1: /*  self_pri && !peer_pri */ return 1;
2337 		case 2: /* !self_pri &&  peer_pri */ return -1;
2338 		case 3: /*  self_pri &&  peer_pri */
2339 			dc = test_bit(DISCARD_CONCURRENT, &mdev->flags);
2340 			return dc ? -1 : 1;
2341 		}
2342 	}
2343 
2344 	*rule_nr = 50;
2345 	peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2346 	if (self == peer)
2347 		return -1;
2348 
2349 	*rule_nr = 51;
2350 	peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
2351 	if (self == peer) {
2352 		self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
2353 		peer = mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1);
2354 		if (self == peer) {
2355 			/* The last P_SYNC_UUID did not get though. Undo the last start of
2356 			   resync as sync source modifications of the peer's UUIDs. */
2357 
2358 			if (mdev->agreed_pro_version < 91)
2359 				return -1001;
2360 
2361 			mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START];
2362 			mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1];
2363 			return -1;
2364 		}
2365 	}
2366 
2367 	*rule_nr = 60;
2368 	self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2369 	for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2370 		peer = mdev->p_uuid[i] & ~((u64)1);
2371 		if (self == peer)
2372 			return -2;
2373 	}
2374 
2375 	*rule_nr = 70;
2376 	self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2377 	peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2378 	if (self == peer)
2379 		return 1;
2380 
2381 	*rule_nr = 71;
2382 	self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
2383 	if (self == peer) {
2384 		self = mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1);
2385 		peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
2386 		if (self == peer) {
2387 			/* The last P_SYNC_UUID did not get though. Undo the last start of
2388 			   resync as sync source modifications of our UUIDs. */
2389 
2390 			if (mdev->agreed_pro_version < 91)
2391 				return -1001;
2392 
2393 			_drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
2394 			_drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]);
2395 
2396 			dev_info(DEV, "Undid last start of resync:\n");
2397 
2398 			drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2399 				       mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2400 
2401 			return 1;
2402 		}
2403 	}
2404 
2405 
2406 	*rule_nr = 80;
2407 	peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2408 	for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2409 		self = mdev->ldev->md.uuid[i] & ~((u64)1);
2410 		if (self == peer)
2411 			return 2;
2412 	}
2413 
2414 	*rule_nr = 90;
2415 	self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2416 	peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2417 	if (self == peer && self != ((u64)0))
2418 		return 100;
2419 
2420 	*rule_nr = 100;
2421 	for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2422 		self = mdev->ldev->md.uuid[i] & ~((u64)1);
2423 		for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) {
2424 			peer = mdev->p_uuid[j] & ~((u64)1);
2425 			if (self == peer)
2426 				return -100;
2427 		}
2428 	}
2429 
2430 	return -1000;
2431 }
2432 
2433 /* drbd_sync_handshake() returns the new conn state on success, or
2434    CONN_MASK (-1) on failure.
2435  */
2436 static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_role peer_role,
2437 					   enum drbd_disk_state peer_disk) __must_hold(local)
2438 {
2439 	int hg, rule_nr;
2440 	enum drbd_conns rv = C_MASK;
2441 	enum drbd_disk_state mydisk;
2442 
2443 	mydisk = mdev->state.disk;
2444 	if (mydisk == D_NEGOTIATING)
2445 		mydisk = mdev->new_state_tmp.disk;
2446 
2447 	dev_info(DEV, "drbd_sync_handshake:\n");
2448 	drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->comm_bm_set, 0);
2449 	drbd_uuid_dump(mdev, "peer", mdev->p_uuid,
2450 		       mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2451 
2452 	hg = drbd_uuid_compare(mdev, &rule_nr);
2453 
2454 	dev_info(DEV, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
2455 
2456 	if (hg == -1000) {
2457 		dev_alert(DEV, "Unrelated data, aborting!\n");
2458 		return C_MASK;
2459 	}
2460 	if (hg == -1001) {
2461 		dev_alert(DEV, "To resolve this both sides have to support at least protocol\n");
2462 		return C_MASK;
2463 	}
2464 
2465 	if    ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) ||
2466 	    (peer_disk == D_INCONSISTENT && mydisk    > D_INCONSISTENT)) {
2467 		int f = (hg == -100) || abs(hg) == 2;
2468 		hg = mydisk > D_INCONSISTENT ? 1 : -1;
2469 		if (f)
2470 			hg = hg*2;
2471 		dev_info(DEV, "Becoming sync %s due to disk states.\n",
2472 		     hg > 0 ? "source" : "target");
2473 	}
2474 
2475 	if (hg == 100 || (hg == -100 && mdev->net_conf->always_asbp)) {
2476 		int pcount = (mdev->state.role == R_PRIMARY)
2477 			   + (peer_role == R_PRIMARY);
2478 		int forced = (hg == -100);
2479 
2480 		switch (pcount) {
2481 		case 0:
2482 			hg = drbd_asb_recover_0p(mdev);
2483 			break;
2484 		case 1:
2485 			hg = drbd_asb_recover_1p(mdev);
2486 			break;
2487 		case 2:
2488 			hg = drbd_asb_recover_2p(mdev);
2489 			break;
2490 		}
2491 		if (abs(hg) < 100) {
2492 			dev_warn(DEV, "Split-Brain detected, %d primaries, "
2493 			     "automatically solved. Sync from %s node\n",
2494 			     pcount, (hg < 0) ? "peer" : "this");
2495 			if (forced) {
2496 				dev_warn(DEV, "Doing a full sync, since"
2497 				     " UUIDs where ambiguous.\n");
2498 				hg = hg*2;
2499 			}
2500 		}
2501 	}
2502 
2503 	if (hg == -100) {
2504 		if (mdev->net_conf->want_lose && !(mdev->p_uuid[UI_FLAGS]&1))
2505 			hg = -1;
2506 		if (!mdev->net_conf->want_lose && (mdev->p_uuid[UI_FLAGS]&1))
2507 			hg = 1;
2508 
2509 		if (abs(hg) < 100)
2510 			dev_warn(DEV, "Split-Brain detected, manually solved. "
2511 			     "Sync from %s node\n",
2512 			     (hg < 0) ? "peer" : "this");
2513 	}
2514 
2515 	if (hg == -100) {
2516 		/* FIXME this log message is not correct if we end up here
2517 		 * after an attempted attach on a diskless node.
2518 		 * We just refuse to attach -- well, we drop the "connection"
2519 		 * to that disk, in a way... */
2520 		dev_alert(DEV, "Split-Brain detected, dropping connection!\n");
2521 		drbd_khelper(mdev, "split-brain");
2522 		return C_MASK;
2523 	}
2524 
2525 	if (hg > 0 && mydisk <= D_INCONSISTENT) {
2526 		dev_err(DEV, "I shall become SyncSource, but I am inconsistent!\n");
2527 		return C_MASK;
2528 	}
2529 
2530 	if (hg < 0 && /* by intention we do not use mydisk here. */
2531 	    mdev->state.role == R_PRIMARY && mdev->state.disk >= D_CONSISTENT) {
2532 		switch (mdev->net_conf->rr_conflict) {
2533 		case ASB_CALL_HELPER:
2534 			drbd_khelper(mdev, "pri-lost");
2535 			/* fall through */
2536 		case ASB_DISCONNECT:
2537 			dev_err(DEV, "I shall become SyncTarget, but I am primary!\n");
2538 			return C_MASK;
2539 		case ASB_VIOLENTLY:
2540 			dev_warn(DEV, "Becoming SyncTarget, violating the stable-data"
2541 			     "assumption\n");
2542 		}
2543 	}
2544 
2545 	if (mdev->net_conf->dry_run || test_bit(CONN_DRY_RUN, &mdev->flags)) {
2546 		if (hg == 0)
2547 			dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
2548 		else
2549 			dev_info(DEV, "dry-run connect: Would become %s, doing a %s resync.",
2550 				 drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET),
2551 				 abs(hg) >= 2 ? "full" : "bit-map based");
2552 		return C_MASK;
2553 	}
2554 
2555 	if (abs(hg) >= 2) {
2556 		dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
2557 		if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake"))
2558 			return C_MASK;
2559 	}
2560 
2561 	if (hg > 0) { /* become sync source. */
2562 		rv = C_WF_BITMAP_S;
2563 	} else if (hg < 0) { /* become sync target */
2564 		rv = C_WF_BITMAP_T;
2565 	} else {
2566 		rv = C_CONNECTED;
2567 		if (drbd_bm_total_weight(mdev)) {
2568 			dev_info(DEV, "No resync, but %lu bits in bitmap!\n",
2569 			     drbd_bm_total_weight(mdev));
2570 		}
2571 	}
2572 
2573 	return rv;
2574 }
2575 
2576 /* returns 1 if invalid */
2577 static int cmp_after_sb(enum drbd_after_sb_p peer, enum drbd_after_sb_p self)
2578 {
2579 	/* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
2580 	if ((peer == ASB_DISCARD_REMOTE && self == ASB_DISCARD_LOCAL) ||
2581 	    (self == ASB_DISCARD_REMOTE && peer == ASB_DISCARD_LOCAL))
2582 		return 0;
2583 
2584 	/* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
2585 	if (peer == ASB_DISCARD_REMOTE || peer == ASB_DISCARD_LOCAL ||
2586 	    self == ASB_DISCARD_REMOTE || self == ASB_DISCARD_LOCAL)
2587 		return 1;
2588 
2589 	/* everything else is valid if they are equal on both sides. */
2590 	if (peer == self)
2591 		return 0;
2592 
2593 	/* everything es is invalid. */
2594 	return 1;
2595 }
2596 
2597 static int receive_protocol(struct drbd_conf *mdev, struct p_header *h)
2598 {
2599 	struct p_protocol *p = (struct p_protocol *)h;
2600 	int header_size, data_size;
2601 	int p_proto, p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
2602 	int p_want_lose, p_two_primaries, cf;
2603 	char p_integrity_alg[SHARED_SECRET_MAX] = "";
2604 
2605 	header_size = sizeof(*p) - sizeof(*h);
2606 	data_size   = h->length  - header_size;
2607 
2608 	if (drbd_recv(mdev, h->payload, header_size) != header_size)
2609 		return FALSE;
2610 
2611 	p_proto		= be32_to_cpu(p->protocol);
2612 	p_after_sb_0p	= be32_to_cpu(p->after_sb_0p);
2613 	p_after_sb_1p	= be32_to_cpu(p->after_sb_1p);
2614 	p_after_sb_2p	= be32_to_cpu(p->after_sb_2p);
2615 	p_two_primaries = be32_to_cpu(p->two_primaries);
2616 	cf		= be32_to_cpu(p->conn_flags);
2617 	p_want_lose = cf & CF_WANT_LOSE;
2618 
2619 	clear_bit(CONN_DRY_RUN, &mdev->flags);
2620 
2621 	if (cf & CF_DRY_RUN)
2622 		set_bit(CONN_DRY_RUN, &mdev->flags);
2623 
2624 	if (p_proto != mdev->net_conf->wire_protocol) {
2625 		dev_err(DEV, "incompatible communication protocols\n");
2626 		goto disconnect;
2627 	}
2628 
2629 	if (cmp_after_sb(p_after_sb_0p, mdev->net_conf->after_sb_0p)) {
2630 		dev_err(DEV, "incompatible after-sb-0pri settings\n");
2631 		goto disconnect;
2632 	}
2633 
2634 	if (cmp_after_sb(p_after_sb_1p, mdev->net_conf->after_sb_1p)) {
2635 		dev_err(DEV, "incompatible after-sb-1pri settings\n");
2636 		goto disconnect;
2637 	}
2638 
2639 	if (cmp_after_sb(p_after_sb_2p, mdev->net_conf->after_sb_2p)) {
2640 		dev_err(DEV, "incompatible after-sb-2pri settings\n");
2641 		goto disconnect;
2642 	}
2643 
2644 	if (p_want_lose && mdev->net_conf->want_lose) {
2645 		dev_err(DEV, "both sides have the 'want_lose' flag set\n");
2646 		goto disconnect;
2647 	}
2648 
2649 	if (p_two_primaries != mdev->net_conf->two_primaries) {
2650 		dev_err(DEV, "incompatible setting of the two-primaries options\n");
2651 		goto disconnect;
2652 	}
2653 
2654 	if (mdev->agreed_pro_version >= 87) {
2655 		unsigned char *my_alg = mdev->net_conf->integrity_alg;
2656 
2657 		if (drbd_recv(mdev, p_integrity_alg, data_size) != data_size)
2658 			return FALSE;
2659 
2660 		p_integrity_alg[SHARED_SECRET_MAX-1] = 0;
2661 		if (strcmp(p_integrity_alg, my_alg)) {
2662 			dev_err(DEV, "incompatible setting of the data-integrity-alg\n");
2663 			goto disconnect;
2664 		}
2665 		dev_info(DEV, "data-integrity-alg: %s\n",
2666 		     my_alg[0] ? my_alg : (unsigned char *)"<not-used>");
2667 	}
2668 
2669 	return TRUE;
2670 
2671 disconnect:
2672 	drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2673 	return FALSE;
2674 }
2675 
2676 /* helper function
2677  * input: alg name, feature name
2678  * return: NULL (alg name was "")
2679  *         ERR_PTR(error) if something goes wrong
2680  *         or the crypto hash ptr, if it worked out ok. */
2681 struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev,
2682 		const char *alg, const char *name)
2683 {
2684 	struct crypto_hash *tfm;
2685 
2686 	if (!alg[0])
2687 		return NULL;
2688 
2689 	tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
2690 	if (IS_ERR(tfm)) {
2691 		dev_err(DEV, "Can not allocate \"%s\" as %s (reason: %ld)\n",
2692 			alg, name, PTR_ERR(tfm));
2693 		return tfm;
2694 	}
2695 	if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
2696 		crypto_free_hash(tfm);
2697 		dev_err(DEV, "\"%s\" is not a digest (%s)\n", alg, name);
2698 		return ERR_PTR(-EINVAL);
2699 	}
2700 	return tfm;
2701 }
2702 
2703 static int receive_SyncParam(struct drbd_conf *mdev, struct p_header *h)
2704 {
2705 	int ok = TRUE;
2706 	struct p_rs_param_89 *p = (struct p_rs_param_89 *)h;
2707 	unsigned int header_size, data_size, exp_max_sz;
2708 	struct crypto_hash *verify_tfm = NULL;
2709 	struct crypto_hash *csums_tfm = NULL;
2710 	const int apv = mdev->agreed_pro_version;
2711 
2712 	exp_max_sz  = apv <= 87 ? sizeof(struct p_rs_param)
2713 		    : apv == 88 ? sizeof(struct p_rs_param)
2714 					+ SHARED_SECRET_MAX
2715 		    : /* 89 */    sizeof(struct p_rs_param_89);
2716 
2717 	if (h->length > exp_max_sz) {
2718 		dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n",
2719 		    h->length, exp_max_sz);
2720 		return FALSE;
2721 	}
2722 
2723 	if (apv <= 88) {
2724 		header_size = sizeof(struct p_rs_param) - sizeof(*h);
2725 		data_size   = h->length  - header_size;
2726 	} else /* apv >= 89 */ {
2727 		header_size = sizeof(struct p_rs_param_89) - sizeof(*h);
2728 		data_size   = h->length  - header_size;
2729 		D_ASSERT(data_size == 0);
2730 	}
2731 
2732 	/* initialize verify_alg and csums_alg */
2733 	memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
2734 
2735 	if (drbd_recv(mdev, h->payload, header_size) != header_size)
2736 		return FALSE;
2737 
2738 	mdev->sync_conf.rate	  = be32_to_cpu(p->rate);
2739 
2740 	if (apv >= 88) {
2741 		if (apv == 88) {
2742 			if (data_size > SHARED_SECRET_MAX) {
2743 				dev_err(DEV, "verify-alg too long, "
2744 				    "peer wants %u, accepting only %u byte\n",
2745 						data_size, SHARED_SECRET_MAX);
2746 				return FALSE;
2747 			}
2748 
2749 			if (drbd_recv(mdev, p->verify_alg, data_size) != data_size)
2750 				return FALSE;
2751 
2752 			/* we expect NUL terminated string */
2753 			/* but just in case someone tries to be evil */
2754 			D_ASSERT(p->verify_alg[data_size-1] == 0);
2755 			p->verify_alg[data_size-1] = 0;
2756 
2757 		} else /* apv >= 89 */ {
2758 			/* we still expect NUL terminated strings */
2759 			/* but just in case someone tries to be evil */
2760 			D_ASSERT(p->verify_alg[SHARED_SECRET_MAX-1] == 0);
2761 			D_ASSERT(p->csums_alg[SHARED_SECRET_MAX-1] == 0);
2762 			p->verify_alg[SHARED_SECRET_MAX-1] = 0;
2763 			p->csums_alg[SHARED_SECRET_MAX-1] = 0;
2764 		}
2765 
2766 		if (strcmp(mdev->sync_conf.verify_alg, p->verify_alg)) {
2767 			if (mdev->state.conn == C_WF_REPORT_PARAMS) {
2768 				dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
2769 				    mdev->sync_conf.verify_alg, p->verify_alg);
2770 				goto disconnect;
2771 			}
2772 			verify_tfm = drbd_crypto_alloc_digest_safe(mdev,
2773 					p->verify_alg, "verify-alg");
2774 			if (IS_ERR(verify_tfm)) {
2775 				verify_tfm = NULL;
2776 				goto disconnect;
2777 			}
2778 		}
2779 
2780 		if (apv >= 89 && strcmp(mdev->sync_conf.csums_alg, p->csums_alg)) {
2781 			if (mdev->state.conn == C_WF_REPORT_PARAMS) {
2782 				dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
2783 				    mdev->sync_conf.csums_alg, p->csums_alg);
2784 				goto disconnect;
2785 			}
2786 			csums_tfm = drbd_crypto_alloc_digest_safe(mdev,
2787 					p->csums_alg, "csums-alg");
2788 			if (IS_ERR(csums_tfm)) {
2789 				csums_tfm = NULL;
2790 				goto disconnect;
2791 			}
2792 		}
2793 
2794 
2795 		spin_lock(&mdev->peer_seq_lock);
2796 		/* lock against drbd_nl_syncer_conf() */
2797 		if (verify_tfm) {
2798 			strcpy(mdev->sync_conf.verify_alg, p->verify_alg);
2799 			mdev->sync_conf.verify_alg_len = strlen(p->verify_alg) + 1;
2800 			crypto_free_hash(mdev->verify_tfm);
2801 			mdev->verify_tfm = verify_tfm;
2802 			dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg);
2803 		}
2804 		if (csums_tfm) {
2805 			strcpy(mdev->sync_conf.csums_alg, p->csums_alg);
2806 			mdev->sync_conf.csums_alg_len = strlen(p->csums_alg) + 1;
2807 			crypto_free_hash(mdev->csums_tfm);
2808 			mdev->csums_tfm = csums_tfm;
2809 			dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg);
2810 		}
2811 		spin_unlock(&mdev->peer_seq_lock);
2812 	}
2813 
2814 	return ok;
2815 disconnect:
2816 	/* just for completeness: actually not needed,
2817 	 * as this is not reached if csums_tfm was ok. */
2818 	crypto_free_hash(csums_tfm);
2819 	/* but free the verify_tfm again, if csums_tfm did not work out */
2820 	crypto_free_hash(verify_tfm);
2821 	drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2822 	return FALSE;
2823 }
2824 
2825 static void drbd_setup_order_type(struct drbd_conf *mdev, int peer)
2826 {
2827 	/* sorry, we currently have no working implementation
2828 	 * of distributed TCQ */
2829 }
2830 
2831 /* warn if the arguments differ by more than 12.5% */
2832 static void warn_if_differ_considerably(struct drbd_conf *mdev,
2833 	const char *s, sector_t a, sector_t b)
2834 {
2835 	sector_t d;
2836 	if (a == 0 || b == 0)
2837 		return;
2838 	d = (a > b) ? (a - b) : (b - a);
2839 	if (d > (a>>3) || d > (b>>3))
2840 		dev_warn(DEV, "Considerable difference in %s: %llus vs. %llus\n", s,
2841 		     (unsigned long long)a, (unsigned long long)b);
2842 }
2843 
2844 static int receive_sizes(struct drbd_conf *mdev, struct p_header *h)
2845 {
2846 	struct p_sizes *p = (struct p_sizes *)h;
2847 	enum determine_dev_size dd = unchanged;
2848 	unsigned int max_seg_s;
2849 	sector_t p_size, p_usize, my_usize;
2850 	int ldsc = 0; /* local disk size changed */
2851 	enum drbd_conns nconn;
2852 
2853 	ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE;
2854 	if (drbd_recv(mdev, h->payload, h->length) != h->length)
2855 		return FALSE;
2856 
2857 	p_size = be64_to_cpu(p->d_size);
2858 	p_usize = be64_to_cpu(p->u_size);
2859 
2860 	if (p_size == 0 && mdev->state.disk == D_DISKLESS) {
2861 		dev_err(DEV, "some backing storage is needed\n");
2862 		drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2863 		return FALSE;
2864 	}
2865 
2866 	/* just store the peer's disk size for now.
2867 	 * we still need to figure out whether we accept that. */
2868 	mdev->p_size = p_size;
2869 
2870 #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
2871 	if (get_ldev(mdev)) {
2872 		warn_if_differ_considerably(mdev, "lower level device sizes",
2873 			   p_size, drbd_get_max_capacity(mdev->ldev));
2874 		warn_if_differ_considerably(mdev, "user requested size",
2875 					    p_usize, mdev->ldev->dc.disk_size);
2876 
2877 		/* if this is the first connect, or an otherwise expected
2878 		 * param exchange, choose the minimum */
2879 		if (mdev->state.conn == C_WF_REPORT_PARAMS)
2880 			p_usize = min_not_zero((sector_t)mdev->ldev->dc.disk_size,
2881 					     p_usize);
2882 
2883 		my_usize = mdev->ldev->dc.disk_size;
2884 
2885 		if (mdev->ldev->dc.disk_size != p_usize) {
2886 			mdev->ldev->dc.disk_size = p_usize;
2887 			dev_info(DEV, "Peer sets u_size to %lu sectors\n",
2888 			     (unsigned long)mdev->ldev->dc.disk_size);
2889 		}
2890 
2891 		/* Never shrink a device with usable data during connect.
2892 		   But allow online shrinking if we are connected. */
2893 		if (drbd_new_dev_size(mdev, mdev->ldev, 0) <
2894 		   drbd_get_capacity(mdev->this_bdev) &&
2895 		   mdev->state.disk >= D_OUTDATED &&
2896 		   mdev->state.conn < C_CONNECTED) {
2897 			dev_err(DEV, "The peer's disk size is too small!\n");
2898 			drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2899 			mdev->ldev->dc.disk_size = my_usize;
2900 			put_ldev(mdev);
2901 			return FALSE;
2902 		}
2903 		put_ldev(mdev);
2904 	}
2905 #undef min_not_zero
2906 
2907 	if (get_ldev(mdev)) {
2908 	  dd = drbd_determin_dev_size(mdev, 0);
2909 		put_ldev(mdev);
2910 		if (dd == dev_size_error)
2911 			return FALSE;
2912 		drbd_md_sync(mdev);
2913 	} else {
2914 		/* I am diskless, need to accept the peer's size. */
2915 		drbd_set_my_capacity(mdev, p_size);
2916 	}
2917 
2918 	if (mdev->p_uuid && mdev->state.conn <= C_CONNECTED && get_ldev(mdev)) {
2919 		nconn = drbd_sync_handshake(mdev,
2920 				mdev->state.peer, mdev->state.pdsk);
2921 		put_ldev(mdev);
2922 
2923 		if (nconn == C_MASK) {
2924 			drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2925 			return FALSE;
2926 		}
2927 
2928 		if (drbd_request_state(mdev, NS(conn, nconn)) < SS_SUCCESS) {
2929 			drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2930 			return FALSE;
2931 		}
2932 	}
2933 
2934 	if (get_ldev(mdev)) {
2935 		if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) {
2936 			mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
2937 			ldsc = 1;
2938 		}
2939 
2940 		max_seg_s = be32_to_cpu(p->max_segment_size);
2941 		if (max_seg_s != queue_max_segment_size(mdev->rq_queue))
2942 			drbd_setup_queue_param(mdev, max_seg_s);
2943 
2944 		drbd_setup_order_type(mdev, be32_to_cpu(p->queue_order_type));
2945 		put_ldev(mdev);
2946 	}
2947 
2948 	if (mdev->state.conn > C_WF_REPORT_PARAMS) {
2949 		if (be64_to_cpu(p->c_size) !=
2950 		    drbd_get_capacity(mdev->this_bdev) || ldsc) {
2951 			/* we have different sizes, probably peer
2952 			 * needs to know my new size... */
2953 			drbd_send_sizes(mdev, 0);
2954 		}
2955 		if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) ||
2956 		    (dd == grew && mdev->state.conn == C_CONNECTED)) {
2957 			if (mdev->state.pdsk >= D_INCONSISTENT &&
2958 			    mdev->state.disk >= D_INCONSISTENT)
2959 				resync_after_online_grow(mdev);
2960 			else
2961 				set_bit(RESYNC_AFTER_NEG, &mdev->flags);
2962 		}
2963 	}
2964 
2965 	return TRUE;
2966 }
2967 
2968 static int receive_uuids(struct drbd_conf *mdev, struct p_header *h)
2969 {
2970 	struct p_uuids *p = (struct p_uuids *)h;
2971 	u64 *p_uuid;
2972 	int i;
2973 
2974 	ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE;
2975 	if (drbd_recv(mdev, h->payload, h->length) != h->length)
2976 		return FALSE;
2977 
2978 	p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO);
2979 
2980 	for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++)
2981 		p_uuid[i] = be64_to_cpu(p->uuid[i]);
2982 
2983 	kfree(mdev->p_uuid);
2984 	mdev->p_uuid = p_uuid;
2985 
2986 	if (mdev->state.conn < C_CONNECTED &&
2987 	    mdev->state.disk < D_INCONSISTENT &&
2988 	    mdev->state.role == R_PRIMARY &&
2989 	    (mdev->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
2990 		dev_err(DEV, "Can only connect to data with current UUID=%016llX\n",
2991 		    (unsigned long long)mdev->ed_uuid);
2992 		drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2993 		return FALSE;
2994 	}
2995 
2996 	if (get_ldev(mdev)) {
2997 		int skip_initial_sync =
2998 			mdev->state.conn == C_CONNECTED &&
2999 			mdev->agreed_pro_version >= 90 &&
3000 			mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
3001 			(p_uuid[UI_FLAGS] & 8);
3002 		if (skip_initial_sync) {
3003 			dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n");
3004 			drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
3005 					"clear_n_write from receive_uuids");
3006 			_drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]);
3007 			_drbd_uuid_set(mdev, UI_BITMAP, 0);
3008 			_drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
3009 					CS_VERBOSE, NULL);
3010 			drbd_md_sync(mdev);
3011 		}
3012 		put_ldev(mdev);
3013 	}
3014 
3015 	/* Before we test for the disk state, we should wait until an eventually
3016 	   ongoing cluster wide state change is finished. That is important if
3017 	   we are primary and are detaching from our disk. We need to see the
3018 	   new disk state... */
3019 	wait_event(mdev->misc_wait, !test_bit(CLUSTER_ST_CHANGE, &mdev->flags));
3020 	if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT)
3021 		drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3022 
3023 	return TRUE;
3024 }
3025 
3026 /**
3027  * convert_state() - Converts the peer's view of the cluster state to our point of view
3028  * @ps:		The state as seen by the peer.
3029  */
3030 static union drbd_state convert_state(union drbd_state ps)
3031 {
3032 	union drbd_state ms;
3033 
3034 	static enum drbd_conns c_tab[] = {
3035 		[C_CONNECTED] = C_CONNECTED,
3036 
3037 		[C_STARTING_SYNC_S] = C_STARTING_SYNC_T,
3038 		[C_STARTING_SYNC_T] = C_STARTING_SYNC_S,
3039 		[C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */
3040 		[C_VERIFY_S]       = C_VERIFY_T,
3041 		[C_MASK]   = C_MASK,
3042 	};
3043 
3044 	ms.i = ps.i;
3045 
3046 	ms.conn = c_tab[ps.conn];
3047 	ms.peer = ps.role;
3048 	ms.role = ps.peer;
3049 	ms.pdsk = ps.disk;
3050 	ms.disk = ps.pdsk;
3051 	ms.peer_isp = (ps.aftr_isp | ps.user_isp);
3052 
3053 	return ms;
3054 }
3055 
3056 static int receive_req_state(struct drbd_conf *mdev, struct p_header *h)
3057 {
3058 	struct p_req_state *p = (struct p_req_state *)h;
3059 	union drbd_state mask, val;
3060 	int rv;
3061 
3062 	ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE;
3063 	if (drbd_recv(mdev, h->payload, h->length) != h->length)
3064 		return FALSE;
3065 
3066 	mask.i = be32_to_cpu(p->mask);
3067 	val.i = be32_to_cpu(p->val);
3068 
3069 	if (test_bit(DISCARD_CONCURRENT, &mdev->flags) &&
3070 	    test_bit(CLUSTER_ST_CHANGE, &mdev->flags)) {
3071 		drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG);
3072 		return TRUE;
3073 	}
3074 
3075 	mask = convert_state(mask);
3076 	val = convert_state(val);
3077 
3078 	rv = drbd_change_state(mdev, CS_VERBOSE, mask, val);
3079 
3080 	drbd_send_sr_reply(mdev, rv);
3081 	drbd_md_sync(mdev);
3082 
3083 	return TRUE;
3084 }
3085 
3086 static int receive_state(struct drbd_conf *mdev, struct p_header *h)
3087 {
3088 	struct p_state *p = (struct p_state *)h;
3089 	enum drbd_conns nconn, oconn;
3090 	union drbd_state ns, peer_state;
3091 	enum drbd_disk_state real_peer_disk;
3092 	int rv;
3093 
3094 	ERR_IF(h->length != (sizeof(*p)-sizeof(*h)))
3095 		return FALSE;
3096 
3097 	if (drbd_recv(mdev, h->payload, h->length) != h->length)
3098 		return FALSE;
3099 
3100 	peer_state.i = be32_to_cpu(p->state);
3101 
3102 	real_peer_disk = peer_state.disk;
3103 	if (peer_state.disk == D_NEGOTIATING) {
3104 		real_peer_disk = mdev->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT;
3105 		dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
3106 	}
3107 
3108 	spin_lock_irq(&mdev->req_lock);
3109  retry:
3110 	oconn = nconn = mdev->state.conn;
3111 	spin_unlock_irq(&mdev->req_lock);
3112 
3113 	if (nconn == C_WF_REPORT_PARAMS)
3114 		nconn = C_CONNECTED;
3115 
3116 	if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING &&
3117 	    get_ldev_if_state(mdev, D_NEGOTIATING)) {
3118 		int cr; /* consider resync */
3119 
3120 		/* if we established a new connection */
3121 		cr  = (oconn < C_CONNECTED);
3122 		/* if we had an established connection
3123 		 * and one of the nodes newly attaches a disk */
3124 		cr |= (oconn == C_CONNECTED &&
3125 		       (peer_state.disk == D_NEGOTIATING ||
3126 			mdev->state.disk == D_NEGOTIATING));
3127 		/* if we have both been inconsistent, and the peer has been
3128 		 * forced to be UpToDate with --overwrite-data */
3129 		cr |= test_bit(CONSIDER_RESYNC, &mdev->flags);
3130 		/* if we had been plain connected, and the admin requested to
3131 		 * start a sync by "invalidate" or "invalidate-remote" */
3132 		cr |= (oconn == C_CONNECTED &&
3133 				(peer_state.conn >= C_STARTING_SYNC_S &&
3134 				 peer_state.conn <= C_WF_BITMAP_T));
3135 
3136 		if (cr)
3137 			nconn = drbd_sync_handshake(mdev, peer_state.role, real_peer_disk);
3138 
3139 		put_ldev(mdev);
3140 		if (nconn == C_MASK) {
3141 			nconn = C_CONNECTED;
3142 			if (mdev->state.disk == D_NEGOTIATING) {
3143 				drbd_force_state(mdev, NS(disk, D_DISKLESS));
3144 			} else if (peer_state.disk == D_NEGOTIATING) {
3145 				dev_err(DEV, "Disk attach process on the peer node was aborted.\n");
3146 				peer_state.disk = D_DISKLESS;
3147 				real_peer_disk = D_DISKLESS;
3148 			} else {
3149 				if (test_and_clear_bit(CONN_DRY_RUN, &mdev->flags))
3150 					return FALSE;
3151 				D_ASSERT(oconn == C_WF_REPORT_PARAMS);
3152 				drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3153 				return FALSE;
3154 			}
3155 		}
3156 	}
3157 
3158 	spin_lock_irq(&mdev->req_lock);
3159 	if (mdev->state.conn != oconn)
3160 		goto retry;
3161 	clear_bit(CONSIDER_RESYNC, &mdev->flags);
3162 	ns.i = mdev->state.i;
3163 	ns.conn = nconn;
3164 	ns.peer = peer_state.role;
3165 	ns.pdsk = real_peer_disk;
3166 	ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
3167 	if ((nconn == C_CONNECTED || nconn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING)
3168 		ns.disk = mdev->new_state_tmp.disk;
3169 
3170 	rv = _drbd_set_state(mdev, ns, CS_VERBOSE | CS_HARD, NULL);
3171 	ns = mdev->state;
3172 	spin_unlock_irq(&mdev->req_lock);
3173 
3174 	if (rv < SS_SUCCESS) {
3175 		drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3176 		return FALSE;
3177 	}
3178 
3179 	if (oconn > C_WF_REPORT_PARAMS) {
3180 		if (nconn > C_CONNECTED && peer_state.conn <= C_CONNECTED &&
3181 		    peer_state.disk != D_NEGOTIATING ) {
3182 			/* we want resync, peer has not yet decided to sync... */
3183 			/* Nowadays only used when forcing a node into primary role and
3184 			   setting its disk to UpToDate with that */
3185 			drbd_send_uuids(mdev);
3186 			drbd_send_state(mdev);
3187 		}
3188 	}
3189 
3190 	mdev->net_conf->want_lose = 0;
3191 
3192 	drbd_md_sync(mdev); /* update connected indicator, la_size, ... */
3193 
3194 	return TRUE;
3195 }
3196 
3197 static int receive_sync_uuid(struct drbd_conf *mdev, struct p_header *h)
3198 {
3199 	struct p_rs_uuid *p = (struct p_rs_uuid *)h;
3200 
3201 	wait_event(mdev->misc_wait,
3202 		   mdev->state.conn == C_WF_SYNC_UUID ||
3203 		   mdev->state.conn < C_CONNECTED ||
3204 		   mdev->state.disk < D_NEGOTIATING);
3205 
3206 	/* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */
3207 
3208 	ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE;
3209 	if (drbd_recv(mdev, h->payload, h->length) != h->length)
3210 		return FALSE;
3211 
3212 	/* Here the _drbd_uuid_ functions are right, current should
3213 	   _not_ be rotated into the history */
3214 	if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
3215 		_drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid));
3216 		_drbd_uuid_set(mdev, UI_BITMAP, 0UL);
3217 
3218 		drbd_start_resync(mdev, C_SYNC_TARGET);
3219 
3220 		put_ldev(mdev);
3221 	} else
3222 		dev_err(DEV, "Ignoring SyncUUID packet!\n");
3223 
3224 	return TRUE;
3225 }
3226 
3227 enum receive_bitmap_ret { OK, DONE, FAILED };
3228 
3229 static enum receive_bitmap_ret
3230 receive_bitmap_plain(struct drbd_conf *mdev, struct p_header *h,
3231 	unsigned long *buffer, struct bm_xfer_ctx *c)
3232 {
3233 	unsigned num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset);
3234 	unsigned want = num_words * sizeof(long);
3235 
3236 	if (want != h->length) {
3237 		dev_err(DEV, "%s:want (%u) != h->length (%u)\n", __func__, want, h->length);
3238 		return FAILED;
3239 	}
3240 	if (want == 0)
3241 		return DONE;
3242 	if (drbd_recv(mdev, buffer, want) != want)
3243 		return FAILED;
3244 
3245 	drbd_bm_merge_lel(mdev, c->word_offset, num_words, buffer);
3246 
3247 	c->word_offset += num_words;
3248 	c->bit_offset = c->word_offset * BITS_PER_LONG;
3249 	if (c->bit_offset > c->bm_bits)
3250 		c->bit_offset = c->bm_bits;
3251 
3252 	return OK;
3253 }
3254 
3255 static enum receive_bitmap_ret
3256 recv_bm_rle_bits(struct drbd_conf *mdev,
3257 		struct p_compressed_bm *p,
3258 		struct bm_xfer_ctx *c)
3259 {
3260 	struct bitstream bs;
3261 	u64 look_ahead;
3262 	u64 rl;
3263 	u64 tmp;
3264 	unsigned long s = c->bit_offset;
3265 	unsigned long e;
3266 	int len = p->head.length - (sizeof(*p) - sizeof(p->head));
3267 	int toggle = DCBP_get_start(p);
3268 	int have;
3269 	int bits;
3270 
3271 	bitstream_init(&bs, p->code, len, DCBP_get_pad_bits(p));
3272 
3273 	bits = bitstream_get_bits(&bs, &look_ahead, 64);
3274 	if (bits < 0)
3275 		return FAILED;
3276 
3277 	for (have = bits; have > 0; s += rl, toggle = !toggle) {
3278 		bits = vli_decode_bits(&rl, look_ahead);
3279 		if (bits <= 0)
3280 			return FAILED;
3281 
3282 		if (toggle) {
3283 			e = s + rl -1;
3284 			if (e >= c->bm_bits) {
3285 				dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e);
3286 				return FAILED;
3287 			}
3288 			_drbd_bm_set_bits(mdev, s, e);
3289 		}
3290 
3291 		if (have < bits) {
3292 			dev_err(DEV, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
3293 				have, bits, look_ahead,
3294 				(unsigned int)(bs.cur.b - p->code),
3295 				(unsigned int)bs.buf_len);
3296 			return FAILED;
3297 		}
3298 		look_ahead >>= bits;
3299 		have -= bits;
3300 
3301 		bits = bitstream_get_bits(&bs, &tmp, 64 - have);
3302 		if (bits < 0)
3303 			return FAILED;
3304 		look_ahead |= tmp << have;
3305 		have += bits;
3306 	}
3307 
3308 	c->bit_offset = s;
3309 	bm_xfer_ctx_bit_to_word_offset(c);
3310 
3311 	return (s == c->bm_bits) ? DONE : OK;
3312 }
3313 
3314 static enum receive_bitmap_ret
3315 decode_bitmap_c(struct drbd_conf *mdev,
3316 		struct p_compressed_bm *p,
3317 		struct bm_xfer_ctx *c)
3318 {
3319 	if (DCBP_get_code(p) == RLE_VLI_Bits)
3320 		return recv_bm_rle_bits(mdev, p, c);
3321 
3322 	/* other variants had been implemented for evaluation,
3323 	 * but have been dropped as this one turned out to be "best"
3324 	 * during all our tests. */
3325 
3326 	dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
3327 	drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
3328 	return FAILED;
3329 }
3330 
3331 void INFO_bm_xfer_stats(struct drbd_conf *mdev,
3332 		const char *direction, struct bm_xfer_ctx *c)
3333 {
3334 	/* what would it take to transfer it "plaintext" */
3335 	unsigned plain = sizeof(struct p_header) *
3336 		((c->bm_words+BM_PACKET_WORDS-1)/BM_PACKET_WORDS+1)
3337 		+ c->bm_words * sizeof(long);
3338 	unsigned total = c->bytes[0] + c->bytes[1];
3339 	unsigned r;
3340 
3341 	/* total can not be zero. but just in case: */
3342 	if (total == 0)
3343 		return;
3344 
3345 	/* don't report if not compressed */
3346 	if (total >= plain)
3347 		return;
3348 
3349 	/* total < plain. check for overflow, still */
3350 	r = (total > UINT_MAX/1000) ? (total / (plain/1000))
3351 		                    : (1000 * total / plain);
3352 
3353 	if (r > 1000)
3354 		r = 1000;
3355 
3356 	r = 1000 - r;
3357 	dev_info(DEV, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
3358 	     "total %u; compression: %u.%u%%\n",
3359 			direction,
3360 			c->bytes[1], c->packets[1],
3361 			c->bytes[0], c->packets[0],
3362 			total, r/10, r % 10);
3363 }
3364 
3365 /* Since we are processing the bitfield from lower addresses to higher,
3366    it does not matter if the process it in 32 bit chunks or 64 bit
3367    chunks as long as it is little endian. (Understand it as byte stream,
3368    beginning with the lowest byte...) If we would use big endian
3369    we would need to process it from the highest address to the lowest,
3370    in order to be agnostic to the 32 vs 64 bits issue.
3371 
3372    returns 0 on failure, 1 if we successfully received it. */
3373 static int receive_bitmap(struct drbd_conf *mdev, struct p_header *h)
3374 {
3375 	struct bm_xfer_ctx c;
3376 	void *buffer;
3377 	enum receive_bitmap_ret ret;
3378 	int ok = FALSE;
3379 
3380 	wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
3381 
3382 	drbd_bm_lock(mdev, "receive bitmap");
3383 
3384 	/* maybe we should use some per thread scratch page,
3385 	 * and allocate that during initial device creation? */
3386 	buffer	 = (unsigned long *) __get_free_page(GFP_NOIO);
3387 	if (!buffer) {
3388 		dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
3389 		goto out;
3390 	}
3391 
3392 	c = (struct bm_xfer_ctx) {
3393 		.bm_bits = drbd_bm_bits(mdev),
3394 		.bm_words = drbd_bm_words(mdev),
3395 	};
3396 
3397 	do {
3398 		if (h->command == P_BITMAP) {
3399 			ret = receive_bitmap_plain(mdev, h, buffer, &c);
3400 		} else if (h->command == P_COMPRESSED_BITMAP) {
3401 			/* MAYBE: sanity check that we speak proto >= 90,
3402 			 * and the feature is enabled! */
3403 			struct p_compressed_bm *p;
3404 
3405 			if (h->length > BM_PACKET_PAYLOAD_BYTES) {
3406 				dev_err(DEV, "ReportCBitmap packet too large\n");
3407 				goto out;
3408 			}
3409 			/* use the page buff */
3410 			p = buffer;
3411 			memcpy(p, h, sizeof(*h));
3412 			if (drbd_recv(mdev, p->head.payload, h->length) != h->length)
3413 				goto out;
3414 			if (p->head.length <= (sizeof(*p) - sizeof(p->head))) {
3415 				dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", p->head.length);
3416 				return FAILED;
3417 			}
3418 			ret = decode_bitmap_c(mdev, p, &c);
3419 		} else {
3420 			dev_warn(DEV, "receive_bitmap: h->command neither ReportBitMap nor ReportCBitMap (is 0x%x)", h->command);
3421 			goto out;
3422 		}
3423 
3424 		c.packets[h->command == P_BITMAP]++;
3425 		c.bytes[h->command == P_BITMAP] += sizeof(struct p_header) + h->length;
3426 
3427 		if (ret != OK)
3428 			break;
3429 
3430 		if (!drbd_recv_header(mdev, h))
3431 			goto out;
3432 	} while (ret == OK);
3433 	if (ret == FAILED)
3434 		goto out;
3435 
3436 	INFO_bm_xfer_stats(mdev, "receive", &c);
3437 
3438 	if (mdev->state.conn == C_WF_BITMAP_T) {
3439 		ok = !drbd_send_bitmap(mdev);
3440 		if (!ok)
3441 			goto out;
3442 		/* Omit CS_ORDERED with this state transition to avoid deadlocks. */
3443 		ok = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
3444 		D_ASSERT(ok == SS_SUCCESS);
3445 	} else if (mdev->state.conn != C_WF_BITMAP_S) {
3446 		/* admin may have requested C_DISCONNECTING,
3447 		 * other threads may have noticed network errors */
3448 		dev_info(DEV, "unexpected cstate (%s) in receive_bitmap\n",
3449 		    drbd_conn_str(mdev->state.conn));
3450 	}
3451 
3452 	ok = TRUE;
3453  out:
3454 	drbd_bm_unlock(mdev);
3455 	if (ok && mdev->state.conn == C_WF_BITMAP_S)
3456 		drbd_start_resync(mdev, C_SYNC_SOURCE);
3457 	free_page((unsigned long) buffer);
3458 	return ok;
3459 }
3460 
3461 static int receive_skip(struct drbd_conf *mdev, struct p_header *h)
3462 {
3463 	/* TODO zero copy sink :) */
3464 	static char sink[128];
3465 	int size, want, r;
3466 
3467 	dev_warn(DEV, "skipping unknown optional packet type %d, l: %d!\n",
3468 	     h->command, h->length);
3469 
3470 	size = h->length;
3471 	while (size > 0) {
3472 		want = min_t(int, size, sizeof(sink));
3473 		r = drbd_recv(mdev, sink, want);
3474 		ERR_IF(r <= 0) break;
3475 		size -= r;
3476 	}
3477 	return size == 0;
3478 }
3479 
3480 static int receive_UnplugRemote(struct drbd_conf *mdev, struct p_header *h)
3481 {
3482 	if (mdev->state.disk >= D_INCONSISTENT)
3483 		drbd_kick_lo(mdev);
3484 
3485 	/* Make sure we've acked all the TCP data associated
3486 	 * with the data requests being unplugged */
3487 	drbd_tcp_quickack(mdev->data.socket);
3488 
3489 	return TRUE;
3490 }
3491 
3492 typedef int (*drbd_cmd_handler_f)(struct drbd_conf *, struct p_header *);
3493 
3494 static drbd_cmd_handler_f drbd_default_handler[] = {
3495 	[P_DATA]	    = receive_Data,
3496 	[P_DATA_REPLY]	    = receive_DataReply,
3497 	[P_RS_DATA_REPLY]   = receive_RSDataReply,
3498 	[P_BARRIER]	    = receive_Barrier,
3499 	[P_BITMAP]	    = receive_bitmap,
3500 	[P_COMPRESSED_BITMAP]    = receive_bitmap,
3501 	[P_UNPLUG_REMOTE]   = receive_UnplugRemote,
3502 	[P_DATA_REQUEST]    = receive_DataRequest,
3503 	[P_RS_DATA_REQUEST] = receive_DataRequest,
3504 	[P_SYNC_PARAM]	    = receive_SyncParam,
3505 	[P_SYNC_PARAM89]	   = receive_SyncParam,
3506 	[P_PROTOCOL]        = receive_protocol,
3507 	[P_UUIDS]	    = receive_uuids,
3508 	[P_SIZES]	    = receive_sizes,
3509 	[P_STATE]	    = receive_state,
3510 	[P_STATE_CHG_REQ]   = receive_req_state,
3511 	[P_SYNC_UUID]       = receive_sync_uuid,
3512 	[P_OV_REQUEST]      = receive_DataRequest,
3513 	[P_OV_REPLY]        = receive_DataRequest,
3514 	[P_CSUM_RS_REQUEST]    = receive_DataRequest,
3515 	/* anything missing from this table is in
3516 	 * the asender_tbl, see get_asender_cmd */
3517 	[P_MAX_CMD]	    = NULL,
3518 };
3519 
3520 static drbd_cmd_handler_f *drbd_cmd_handler = drbd_default_handler;
3521 static drbd_cmd_handler_f *drbd_opt_cmd_handler;
3522 
3523 static void drbdd(struct drbd_conf *mdev)
3524 {
3525 	drbd_cmd_handler_f handler;
3526 	struct p_header *header = &mdev->data.rbuf.header;
3527 
3528 	while (get_t_state(&mdev->receiver) == Running) {
3529 		drbd_thread_current_set_cpu(mdev);
3530 		if (!drbd_recv_header(mdev, header)) {
3531 			drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
3532 			break;
3533 		}
3534 
3535 		if (header->command < P_MAX_CMD)
3536 			handler = drbd_cmd_handler[header->command];
3537 		else if (P_MAY_IGNORE < header->command
3538 		     && header->command < P_MAX_OPT_CMD)
3539 			handler = drbd_opt_cmd_handler[header->command-P_MAY_IGNORE];
3540 		else if (header->command > P_MAX_OPT_CMD)
3541 			handler = receive_skip;
3542 		else
3543 			handler = NULL;
3544 
3545 		if (unlikely(!handler)) {
3546 			dev_err(DEV, "unknown packet type %d, l: %d!\n",
3547 			    header->command, header->length);
3548 			drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
3549 			break;
3550 		}
3551 		if (unlikely(!handler(mdev, header))) {
3552 			dev_err(DEV, "error receiving %s, l: %d!\n",
3553 			    cmdname(header->command), header->length);
3554 			drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
3555 			break;
3556 		}
3557 	}
3558 }
3559 
3560 static void drbd_fail_pending_reads(struct drbd_conf *mdev)
3561 {
3562 	struct hlist_head *slot;
3563 	struct hlist_node *pos;
3564 	struct hlist_node *tmp;
3565 	struct drbd_request *req;
3566 	int i;
3567 
3568 	/*
3569 	 * Application READ requests
3570 	 */
3571 	spin_lock_irq(&mdev->req_lock);
3572 	for (i = 0; i < APP_R_HSIZE; i++) {
3573 		slot = mdev->app_reads_hash+i;
3574 		hlist_for_each_entry_safe(req, pos, tmp, slot, colision) {
3575 			/* it may (but should not any longer!)
3576 			 * be on the work queue; if that assert triggers,
3577 			 * we need to also grab the
3578 			 * spin_lock_irq(&mdev->data.work.q_lock);
3579 			 * and list_del_init here. */
3580 			D_ASSERT(list_empty(&req->w.list));
3581 			/* It would be nice to complete outside of spinlock.
3582 			 * But this is easier for now. */
3583 			_req_mod(req, connection_lost_while_pending);
3584 		}
3585 	}
3586 	for (i = 0; i < APP_R_HSIZE; i++)
3587 		if (!hlist_empty(mdev->app_reads_hash+i))
3588 			dev_warn(DEV, "ASSERT FAILED: app_reads_hash[%d].first: "
3589 				"%p, should be NULL\n", i, mdev->app_reads_hash[i].first);
3590 
3591 	memset(mdev->app_reads_hash, 0, APP_R_HSIZE*sizeof(void *));
3592 	spin_unlock_irq(&mdev->req_lock);
3593 }
3594 
3595 void drbd_flush_workqueue(struct drbd_conf *mdev)
3596 {
3597 	struct drbd_wq_barrier barr;
3598 
3599 	barr.w.cb = w_prev_work_done;
3600 	init_completion(&barr.done);
3601 	drbd_queue_work(&mdev->data.work, &barr.w);
3602 	wait_for_completion(&barr.done);
3603 }
3604 
3605 static void drbd_disconnect(struct drbd_conf *mdev)
3606 {
3607 	enum drbd_fencing_p fp;
3608 	union drbd_state os, ns;
3609 	int rv = SS_UNKNOWN_ERROR;
3610 	unsigned int i;
3611 
3612 	if (mdev->state.conn == C_STANDALONE)
3613 		return;
3614 	if (mdev->state.conn >= C_WF_CONNECTION)
3615 		dev_err(DEV, "ASSERT FAILED cstate = %s, expected < WFConnection\n",
3616 				drbd_conn_str(mdev->state.conn));
3617 
3618 	/* asender does not clean up anything. it must not interfere, either */
3619 	drbd_thread_stop(&mdev->asender);
3620 
3621 	mutex_lock(&mdev->data.mutex);
3622 	drbd_free_sock(mdev);
3623 	mutex_unlock(&mdev->data.mutex);
3624 
3625 	spin_lock_irq(&mdev->req_lock);
3626 	_drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
3627 	_drbd_wait_ee_list_empty(mdev, &mdev->sync_ee);
3628 	_drbd_wait_ee_list_empty(mdev, &mdev->read_ee);
3629 	spin_unlock_irq(&mdev->req_lock);
3630 
3631 	/* We do not have data structures that would allow us to
3632 	 * get the rs_pending_cnt down to 0 again.
3633 	 *  * On C_SYNC_TARGET we do not have any data structures describing
3634 	 *    the pending RSDataRequest's we have sent.
3635 	 *  * On C_SYNC_SOURCE there is no data structure that tracks
3636 	 *    the P_RS_DATA_REPLY blocks that we sent to the SyncTarget.
3637 	 *  And no, it is not the sum of the reference counts in the
3638 	 *  resync_LRU. The resync_LRU tracks the whole operation including
3639 	 *  the disk-IO, while the rs_pending_cnt only tracks the blocks
3640 	 *  on the fly. */
3641 	drbd_rs_cancel_all(mdev);
3642 	mdev->rs_total = 0;
3643 	mdev->rs_failed = 0;
3644 	atomic_set(&mdev->rs_pending_cnt, 0);
3645 	wake_up(&mdev->misc_wait);
3646 
3647 	/* make sure syncer is stopped and w_resume_next_sg queued */
3648 	del_timer_sync(&mdev->resync_timer);
3649 	set_bit(STOP_SYNC_TIMER, &mdev->flags);
3650 	resync_timer_fn((unsigned long)mdev);
3651 
3652 	/* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
3653 	 * w_make_resync_request etc. which may still be on the worker queue
3654 	 * to be "canceled" */
3655 	drbd_flush_workqueue(mdev);
3656 
3657 	/* This also does reclaim_net_ee().  If we do this too early, we might
3658 	 * miss some resync ee and pages.*/
3659 	drbd_process_done_ee(mdev);
3660 
3661 	kfree(mdev->p_uuid);
3662 	mdev->p_uuid = NULL;
3663 
3664 	if (!mdev->state.susp)
3665 		tl_clear(mdev);
3666 
3667 	drbd_fail_pending_reads(mdev);
3668 
3669 	dev_info(DEV, "Connection closed\n");
3670 
3671 	drbd_md_sync(mdev);
3672 
3673 	fp = FP_DONT_CARE;
3674 	if (get_ldev(mdev)) {
3675 		fp = mdev->ldev->dc.fencing;
3676 		put_ldev(mdev);
3677 	}
3678 
3679 	if (mdev->state.role == R_PRIMARY) {
3680 		if (fp >= FP_RESOURCE && mdev->state.pdsk >= D_UNKNOWN) {
3681 			enum drbd_disk_state nps = drbd_try_outdate_peer(mdev);
3682 			drbd_request_state(mdev, NS(pdsk, nps));
3683 		}
3684 	}
3685 
3686 	spin_lock_irq(&mdev->req_lock);
3687 	os = mdev->state;
3688 	if (os.conn >= C_UNCONNECTED) {
3689 		/* Do not restart in case we are C_DISCONNECTING */
3690 		ns = os;
3691 		ns.conn = C_UNCONNECTED;
3692 		rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
3693 	}
3694 	spin_unlock_irq(&mdev->req_lock);
3695 
3696 	if (os.conn == C_DISCONNECTING) {
3697 		struct hlist_head *h;
3698 		wait_event(mdev->misc_wait, atomic_read(&mdev->net_cnt) == 0);
3699 
3700 		/* we must not free the tl_hash
3701 		 * while application io is still on the fly */
3702 		wait_event(mdev->misc_wait, atomic_read(&mdev->ap_bio_cnt) == 0);
3703 
3704 		spin_lock_irq(&mdev->req_lock);
3705 		/* paranoia code */
3706 		for (h = mdev->ee_hash; h < mdev->ee_hash + mdev->ee_hash_s; h++)
3707 			if (h->first)
3708 				dev_err(DEV, "ASSERT FAILED ee_hash[%u].first == %p, expected NULL\n",
3709 						(int)(h - mdev->ee_hash), h->first);
3710 		kfree(mdev->ee_hash);
3711 		mdev->ee_hash = NULL;
3712 		mdev->ee_hash_s = 0;
3713 
3714 		/* paranoia code */
3715 		for (h = mdev->tl_hash; h < mdev->tl_hash + mdev->tl_hash_s; h++)
3716 			if (h->first)
3717 				dev_err(DEV, "ASSERT FAILED tl_hash[%u] == %p, expected NULL\n",
3718 						(int)(h - mdev->tl_hash), h->first);
3719 		kfree(mdev->tl_hash);
3720 		mdev->tl_hash = NULL;
3721 		mdev->tl_hash_s = 0;
3722 		spin_unlock_irq(&mdev->req_lock);
3723 
3724 		crypto_free_hash(mdev->cram_hmac_tfm);
3725 		mdev->cram_hmac_tfm = NULL;
3726 
3727 		kfree(mdev->net_conf);
3728 		mdev->net_conf = NULL;
3729 		drbd_request_state(mdev, NS(conn, C_STANDALONE));
3730 	}
3731 
3732 	/* tcp_close and release of sendpage pages can be deferred.  I don't
3733 	 * want to use SO_LINGER, because apparently it can be deferred for
3734 	 * more than 20 seconds (longest time I checked).
3735 	 *
3736 	 * Actually we don't care for exactly when the network stack does its
3737 	 * put_page(), but release our reference on these pages right here.
3738 	 */
3739 	i = drbd_release_ee(mdev, &mdev->net_ee);
3740 	if (i)
3741 		dev_info(DEV, "net_ee not empty, killed %u entries\n", i);
3742 	i = atomic_read(&mdev->pp_in_use);
3743 	if (i)
3744 		dev_info(DEV, "pp_in_use = %u, expected 0\n", i);
3745 
3746 	D_ASSERT(list_empty(&mdev->read_ee));
3747 	D_ASSERT(list_empty(&mdev->active_ee));
3748 	D_ASSERT(list_empty(&mdev->sync_ee));
3749 	D_ASSERT(list_empty(&mdev->done_ee));
3750 
3751 	/* ok, no more ee's on the fly, it is safe to reset the epoch_size */
3752 	atomic_set(&mdev->current_epoch->epoch_size, 0);
3753 	D_ASSERT(list_empty(&mdev->current_epoch->list));
3754 }
3755 
3756 /*
3757  * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version
3758  * we can agree on is stored in agreed_pro_version.
3759  *
3760  * feature flags and the reserved array should be enough room for future
3761  * enhancements of the handshake protocol, and possible plugins...
3762  *
3763  * for now, they are expected to be zero, but ignored.
3764  */
3765 static int drbd_send_handshake(struct drbd_conf *mdev)
3766 {
3767 	/* ASSERT current == mdev->receiver ... */
3768 	struct p_handshake *p = &mdev->data.sbuf.handshake;
3769 	int ok;
3770 
3771 	if (mutex_lock_interruptible(&mdev->data.mutex)) {
3772 		dev_err(DEV, "interrupted during initial handshake\n");
3773 		return 0; /* interrupted. not ok. */
3774 	}
3775 
3776 	if (mdev->data.socket == NULL) {
3777 		mutex_unlock(&mdev->data.mutex);
3778 		return 0;
3779 	}
3780 
3781 	memset(p, 0, sizeof(*p));
3782 	p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
3783 	p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
3784 	ok = _drbd_send_cmd( mdev, mdev->data.socket, P_HAND_SHAKE,
3785 			     (struct p_header *)p, sizeof(*p), 0 );
3786 	mutex_unlock(&mdev->data.mutex);
3787 	return ok;
3788 }
3789 
3790 /*
3791  * return values:
3792  *   1 yes, we have a valid connection
3793  *   0 oops, did not work out, please try again
3794  *  -1 peer talks different language,
3795  *     no point in trying again, please go standalone.
3796  */
3797 static int drbd_do_handshake(struct drbd_conf *mdev)
3798 {
3799 	/* ASSERT current == mdev->receiver ... */
3800 	struct p_handshake *p = &mdev->data.rbuf.handshake;
3801 	const int expect = sizeof(struct p_handshake)
3802 			  -sizeof(struct p_header);
3803 	int rv;
3804 
3805 	rv = drbd_send_handshake(mdev);
3806 	if (!rv)
3807 		return 0;
3808 
3809 	rv = drbd_recv_header(mdev, &p->head);
3810 	if (!rv)
3811 		return 0;
3812 
3813 	if (p->head.command != P_HAND_SHAKE) {
3814 		dev_err(DEV, "expected HandShake packet, received: %s (0x%04x)\n",
3815 		     cmdname(p->head.command), p->head.command);
3816 		return -1;
3817 	}
3818 
3819 	if (p->head.length != expect) {
3820 		dev_err(DEV, "expected HandShake length: %u, received: %u\n",
3821 		     expect, p->head.length);
3822 		return -1;
3823 	}
3824 
3825 	rv = drbd_recv(mdev, &p->head.payload, expect);
3826 
3827 	if (rv != expect) {
3828 		dev_err(DEV, "short read receiving handshake packet: l=%u\n", rv);
3829 		return 0;
3830 	}
3831 
3832 	p->protocol_min = be32_to_cpu(p->protocol_min);
3833 	p->protocol_max = be32_to_cpu(p->protocol_max);
3834 	if (p->protocol_max == 0)
3835 		p->protocol_max = p->protocol_min;
3836 
3837 	if (PRO_VERSION_MAX < p->protocol_min ||
3838 	    PRO_VERSION_MIN > p->protocol_max)
3839 		goto incompat;
3840 
3841 	mdev->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
3842 
3843 	dev_info(DEV, "Handshake successful: "
3844 	     "Agreed network protocol version %d\n", mdev->agreed_pro_version);
3845 
3846 	return 1;
3847 
3848  incompat:
3849 	dev_err(DEV, "incompatible DRBD dialects: "
3850 	    "I support %d-%d, peer supports %d-%d\n",
3851 	    PRO_VERSION_MIN, PRO_VERSION_MAX,
3852 	    p->protocol_min, p->protocol_max);
3853 	return -1;
3854 }
3855 
3856 #if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
3857 static int drbd_do_auth(struct drbd_conf *mdev)
3858 {
3859 	dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
3860 	dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
3861 	return -1;
3862 }
3863 #else
3864 #define CHALLENGE_LEN 64
3865 
3866 /* Return value:
3867 	1 - auth succeeded,
3868 	0 - failed, try again (network error),
3869 	-1 - auth failed, don't try again.
3870 */
3871 
3872 static int drbd_do_auth(struct drbd_conf *mdev)
3873 {
3874 	char my_challenge[CHALLENGE_LEN];  /* 64 Bytes... */
3875 	struct scatterlist sg;
3876 	char *response = NULL;
3877 	char *right_response = NULL;
3878 	char *peers_ch = NULL;
3879 	struct p_header p;
3880 	unsigned int key_len = strlen(mdev->net_conf->shared_secret);
3881 	unsigned int resp_size;
3882 	struct hash_desc desc;
3883 	int rv;
3884 
3885 	desc.tfm = mdev->cram_hmac_tfm;
3886 	desc.flags = 0;
3887 
3888 	rv = crypto_hash_setkey(mdev->cram_hmac_tfm,
3889 				(u8 *)mdev->net_conf->shared_secret, key_len);
3890 	if (rv) {
3891 		dev_err(DEV, "crypto_hash_setkey() failed with %d\n", rv);
3892 		rv = -1;
3893 		goto fail;
3894 	}
3895 
3896 	get_random_bytes(my_challenge, CHALLENGE_LEN);
3897 
3898 	rv = drbd_send_cmd2(mdev, P_AUTH_CHALLENGE, my_challenge, CHALLENGE_LEN);
3899 	if (!rv)
3900 		goto fail;
3901 
3902 	rv = drbd_recv_header(mdev, &p);
3903 	if (!rv)
3904 		goto fail;
3905 
3906 	if (p.command != P_AUTH_CHALLENGE) {
3907 		dev_err(DEV, "expected AuthChallenge packet, received: %s (0x%04x)\n",
3908 		    cmdname(p.command), p.command);
3909 		rv = 0;
3910 		goto fail;
3911 	}
3912 
3913 	if (p.length > CHALLENGE_LEN*2) {
3914 		dev_err(DEV, "expected AuthChallenge payload too big.\n");
3915 		rv = -1;
3916 		goto fail;
3917 	}
3918 
3919 	peers_ch = kmalloc(p.length, GFP_NOIO);
3920 	if (peers_ch == NULL) {
3921 		dev_err(DEV, "kmalloc of peers_ch failed\n");
3922 		rv = -1;
3923 		goto fail;
3924 	}
3925 
3926 	rv = drbd_recv(mdev, peers_ch, p.length);
3927 
3928 	if (rv != p.length) {
3929 		dev_err(DEV, "short read AuthChallenge: l=%u\n", rv);
3930 		rv = 0;
3931 		goto fail;
3932 	}
3933 
3934 	resp_size = crypto_hash_digestsize(mdev->cram_hmac_tfm);
3935 	response = kmalloc(resp_size, GFP_NOIO);
3936 	if (response == NULL) {
3937 		dev_err(DEV, "kmalloc of response failed\n");
3938 		rv = -1;
3939 		goto fail;
3940 	}
3941 
3942 	sg_init_table(&sg, 1);
3943 	sg_set_buf(&sg, peers_ch, p.length);
3944 
3945 	rv = crypto_hash_digest(&desc, &sg, sg.length, response);
3946 	if (rv) {
3947 		dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
3948 		rv = -1;
3949 		goto fail;
3950 	}
3951 
3952 	rv = drbd_send_cmd2(mdev, P_AUTH_RESPONSE, response, resp_size);
3953 	if (!rv)
3954 		goto fail;
3955 
3956 	rv = drbd_recv_header(mdev, &p);
3957 	if (!rv)
3958 		goto fail;
3959 
3960 	if (p.command != P_AUTH_RESPONSE) {
3961 		dev_err(DEV, "expected AuthResponse packet, received: %s (0x%04x)\n",
3962 		    cmdname(p.command), p.command);
3963 		rv = 0;
3964 		goto fail;
3965 	}
3966 
3967 	if (p.length != resp_size) {
3968 		dev_err(DEV, "expected AuthResponse payload of wrong size\n");
3969 		rv = 0;
3970 		goto fail;
3971 	}
3972 
3973 	rv = drbd_recv(mdev, response , resp_size);
3974 
3975 	if (rv != resp_size) {
3976 		dev_err(DEV, "short read receiving AuthResponse: l=%u\n", rv);
3977 		rv = 0;
3978 		goto fail;
3979 	}
3980 
3981 	right_response = kmalloc(resp_size, GFP_NOIO);
3982 	if (right_response == NULL) {
3983 		dev_err(DEV, "kmalloc of right_response failed\n");
3984 		rv = -1;
3985 		goto fail;
3986 	}
3987 
3988 	sg_set_buf(&sg, my_challenge, CHALLENGE_LEN);
3989 
3990 	rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
3991 	if (rv) {
3992 		dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
3993 		rv = -1;
3994 		goto fail;
3995 	}
3996 
3997 	rv = !memcmp(response, right_response, resp_size);
3998 
3999 	if (rv)
4000 		dev_info(DEV, "Peer authenticated using %d bytes of '%s' HMAC\n",
4001 		     resp_size, mdev->net_conf->cram_hmac_alg);
4002 	else
4003 		rv = -1;
4004 
4005  fail:
4006 	kfree(peers_ch);
4007 	kfree(response);
4008 	kfree(right_response);
4009 
4010 	return rv;
4011 }
4012 #endif
4013 
4014 int drbdd_init(struct drbd_thread *thi)
4015 {
4016 	struct drbd_conf *mdev = thi->mdev;
4017 	unsigned int minor = mdev_to_minor(mdev);
4018 	int h;
4019 
4020 	sprintf(current->comm, "drbd%d_receiver", minor);
4021 
4022 	dev_info(DEV, "receiver (re)started\n");
4023 
4024 	do {
4025 		h = drbd_connect(mdev);
4026 		if (h == 0) {
4027 			drbd_disconnect(mdev);
4028 			__set_current_state(TASK_INTERRUPTIBLE);
4029 			schedule_timeout(HZ);
4030 		}
4031 		if (h == -1) {
4032 			dev_warn(DEV, "Discarding network configuration.\n");
4033 			drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
4034 		}
4035 	} while (h == 0);
4036 
4037 	if (h > 0) {
4038 		if (get_net_conf(mdev)) {
4039 			drbdd(mdev);
4040 			put_net_conf(mdev);
4041 		}
4042 	}
4043 
4044 	drbd_disconnect(mdev);
4045 
4046 	dev_info(DEV, "receiver terminated\n");
4047 	return 0;
4048 }
4049 
4050 /* ********* acknowledge sender ******** */
4051 
4052 static int got_RqSReply(struct drbd_conf *mdev, struct p_header *h)
4053 {
4054 	struct p_req_state_reply *p = (struct p_req_state_reply *)h;
4055 
4056 	int retcode = be32_to_cpu(p->retcode);
4057 
4058 	if (retcode >= SS_SUCCESS) {
4059 		set_bit(CL_ST_CHG_SUCCESS, &mdev->flags);
4060 	} else {
4061 		set_bit(CL_ST_CHG_FAIL, &mdev->flags);
4062 		dev_err(DEV, "Requested state change failed by peer: %s (%d)\n",
4063 		    drbd_set_st_err_str(retcode), retcode);
4064 	}
4065 	wake_up(&mdev->state_wait);
4066 
4067 	return TRUE;
4068 }
4069 
4070 static int got_Ping(struct drbd_conf *mdev, struct p_header *h)
4071 {
4072 	return drbd_send_ping_ack(mdev);
4073 
4074 }
4075 
4076 static int got_PingAck(struct drbd_conf *mdev, struct p_header *h)
4077 {
4078 	/* restore idle timeout */
4079 	mdev->meta.socket->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
4080 
4081 	return TRUE;
4082 }
4083 
4084 static int got_IsInSync(struct drbd_conf *mdev, struct p_header *h)
4085 {
4086 	struct p_block_ack *p = (struct p_block_ack *)h;
4087 	sector_t sector = be64_to_cpu(p->sector);
4088 	int blksize = be32_to_cpu(p->blksize);
4089 
4090 	D_ASSERT(mdev->agreed_pro_version >= 89);
4091 
4092 	update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4093 
4094 	drbd_rs_complete_io(mdev, sector);
4095 	drbd_set_in_sync(mdev, sector, blksize);
4096 	/* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
4097 	mdev->rs_same_csum += (blksize >> BM_BLOCK_SHIFT);
4098 	dec_rs_pending(mdev);
4099 
4100 	return TRUE;
4101 }
4102 
4103 /* when we receive the ACK for a write request,
4104  * verify that we actually know about it */
4105 static struct drbd_request *_ack_id_to_req(struct drbd_conf *mdev,
4106 	u64 id, sector_t sector)
4107 {
4108 	struct hlist_head *slot = tl_hash_slot(mdev, sector);
4109 	struct hlist_node *n;
4110 	struct drbd_request *req;
4111 
4112 	hlist_for_each_entry(req, n, slot, colision) {
4113 		if ((unsigned long)req == (unsigned long)id) {
4114 			if (req->sector != sector) {
4115 				dev_err(DEV, "_ack_id_to_req: found req %p but it has "
4116 				    "wrong sector (%llus versus %llus)\n", req,
4117 				    (unsigned long long)req->sector,
4118 				    (unsigned long long)sector);
4119 				break;
4120 			}
4121 			return req;
4122 		}
4123 	}
4124 	dev_err(DEV, "_ack_id_to_req: failed to find req %p, sector %llus in list\n",
4125 		(void *)(unsigned long)id, (unsigned long long)sector);
4126 	return NULL;
4127 }
4128 
4129 typedef struct drbd_request *(req_validator_fn)
4130 	(struct drbd_conf *mdev, u64 id, sector_t sector);
4131 
4132 static int validate_req_change_req_state(struct drbd_conf *mdev,
4133 	u64 id, sector_t sector, req_validator_fn validator,
4134 	const char *func, enum drbd_req_event what)
4135 {
4136 	struct drbd_request *req;
4137 	struct bio_and_error m;
4138 
4139 	spin_lock_irq(&mdev->req_lock);
4140 	req = validator(mdev, id, sector);
4141 	if (unlikely(!req)) {
4142 		spin_unlock_irq(&mdev->req_lock);
4143 		dev_err(DEV, "%s: got a corrupt block_id/sector pair\n", func);
4144 		return FALSE;
4145 	}
4146 	__req_mod(req, what, &m);
4147 	spin_unlock_irq(&mdev->req_lock);
4148 
4149 	if (m.bio)
4150 		complete_master_bio(mdev, &m);
4151 	return TRUE;
4152 }
4153 
4154 static int got_BlockAck(struct drbd_conf *mdev, struct p_header *h)
4155 {
4156 	struct p_block_ack *p = (struct p_block_ack *)h;
4157 	sector_t sector = be64_to_cpu(p->sector);
4158 	int blksize = be32_to_cpu(p->blksize);
4159 	enum drbd_req_event what;
4160 
4161 	update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4162 
4163 	if (is_syncer_block_id(p->block_id)) {
4164 		drbd_set_in_sync(mdev, sector, blksize);
4165 		dec_rs_pending(mdev);
4166 		return TRUE;
4167 	}
4168 	switch (be16_to_cpu(h->command)) {
4169 	case P_RS_WRITE_ACK:
4170 		D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4171 		what = write_acked_by_peer_and_sis;
4172 		break;
4173 	case P_WRITE_ACK:
4174 		D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4175 		what = write_acked_by_peer;
4176 		break;
4177 	case P_RECV_ACK:
4178 		D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_B);
4179 		what = recv_acked_by_peer;
4180 		break;
4181 	case P_DISCARD_ACK:
4182 		D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4183 		what = conflict_discarded_by_peer;
4184 		break;
4185 	default:
4186 		D_ASSERT(0);
4187 		return FALSE;
4188 	}
4189 
4190 	return validate_req_change_req_state(mdev, p->block_id, sector,
4191 		_ack_id_to_req, __func__ , what);
4192 }
4193 
4194 static int got_NegAck(struct drbd_conf *mdev, struct p_header *h)
4195 {
4196 	struct p_block_ack *p = (struct p_block_ack *)h;
4197 	sector_t sector = be64_to_cpu(p->sector);
4198 
4199 	if (__ratelimit(&drbd_ratelimit_state))
4200 		dev_warn(DEV, "Got NegAck packet. Peer is in troubles?\n");
4201 
4202 	update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4203 
4204 	if (is_syncer_block_id(p->block_id)) {
4205 		int size = be32_to_cpu(p->blksize);
4206 		dec_rs_pending(mdev);
4207 		drbd_rs_failed_io(mdev, sector, size);
4208 		return TRUE;
4209 	}
4210 	return validate_req_change_req_state(mdev, p->block_id, sector,
4211 		_ack_id_to_req, __func__ , neg_acked);
4212 }
4213 
4214 static int got_NegDReply(struct drbd_conf *mdev, struct p_header *h)
4215 {
4216 	struct p_block_ack *p = (struct p_block_ack *)h;
4217 	sector_t sector = be64_to_cpu(p->sector);
4218 
4219 	update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4220 	dev_err(DEV, "Got NegDReply; Sector %llus, len %u; Fail original request.\n",
4221 	    (unsigned long long)sector, be32_to_cpu(p->blksize));
4222 
4223 	return validate_req_change_req_state(mdev, p->block_id, sector,
4224 		_ar_id_to_req, __func__ , neg_acked);
4225 }
4226 
4227 static int got_NegRSDReply(struct drbd_conf *mdev, struct p_header *h)
4228 {
4229 	sector_t sector;
4230 	int size;
4231 	struct p_block_ack *p = (struct p_block_ack *)h;
4232 
4233 	sector = be64_to_cpu(p->sector);
4234 	size = be32_to_cpu(p->blksize);
4235 	D_ASSERT(p->block_id == ID_SYNCER);
4236 
4237 	update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4238 
4239 	dec_rs_pending(mdev);
4240 
4241 	if (get_ldev_if_state(mdev, D_FAILED)) {
4242 		drbd_rs_complete_io(mdev, sector);
4243 		drbd_rs_failed_io(mdev, sector, size);
4244 		put_ldev(mdev);
4245 	}
4246 
4247 	return TRUE;
4248 }
4249 
4250 static int got_BarrierAck(struct drbd_conf *mdev, struct p_header *h)
4251 {
4252 	struct p_barrier_ack *p = (struct p_barrier_ack *)h;
4253 
4254 	tl_release(mdev, p->barrier, be32_to_cpu(p->set_size));
4255 
4256 	return TRUE;
4257 }
4258 
4259 static int got_OVResult(struct drbd_conf *mdev, struct p_header *h)
4260 {
4261 	struct p_block_ack *p = (struct p_block_ack *)h;
4262 	struct drbd_work *w;
4263 	sector_t sector;
4264 	int size;
4265 
4266 	sector = be64_to_cpu(p->sector);
4267 	size = be32_to_cpu(p->blksize);
4268 
4269 	update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4270 
4271 	if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC)
4272 		drbd_ov_oos_found(mdev, sector, size);
4273 	else
4274 		ov_oos_print(mdev);
4275 
4276 	drbd_rs_complete_io(mdev, sector);
4277 	dec_rs_pending(mdev);
4278 
4279 	if (--mdev->ov_left == 0) {
4280 		w = kmalloc(sizeof(*w), GFP_NOIO);
4281 		if (w) {
4282 			w->cb = w_ov_finished;
4283 			drbd_queue_work_front(&mdev->data.work, w);
4284 		} else {
4285 			dev_err(DEV, "kmalloc(w) failed.");
4286 			ov_oos_print(mdev);
4287 			drbd_resync_finished(mdev);
4288 		}
4289 	}
4290 	return TRUE;
4291 }
4292 
4293 struct asender_cmd {
4294 	size_t pkt_size;
4295 	int (*process)(struct drbd_conf *mdev, struct p_header *h);
4296 };
4297 
4298 static struct asender_cmd *get_asender_cmd(int cmd)
4299 {
4300 	static struct asender_cmd asender_tbl[] = {
4301 		/* anything missing from this table is in
4302 		 * the drbd_cmd_handler (drbd_default_handler) table,
4303 		 * see the beginning of drbdd() */
4304 	[P_PING]	    = { sizeof(struct p_header), got_Ping },
4305 	[P_PING_ACK]	    = { sizeof(struct p_header), got_PingAck },
4306 	[P_RECV_ACK]	    = { sizeof(struct p_block_ack), got_BlockAck },
4307 	[P_WRITE_ACK]	    = { sizeof(struct p_block_ack), got_BlockAck },
4308 	[P_RS_WRITE_ACK]    = { sizeof(struct p_block_ack), got_BlockAck },
4309 	[P_DISCARD_ACK]	    = { sizeof(struct p_block_ack), got_BlockAck },
4310 	[P_NEG_ACK]	    = { sizeof(struct p_block_ack), got_NegAck },
4311 	[P_NEG_DREPLY]	    = { sizeof(struct p_block_ack), got_NegDReply },
4312 	[P_NEG_RS_DREPLY]   = { sizeof(struct p_block_ack), got_NegRSDReply},
4313 	[P_OV_RESULT]	    = { sizeof(struct p_block_ack), got_OVResult },
4314 	[P_BARRIER_ACK]	    = { sizeof(struct p_barrier_ack), got_BarrierAck },
4315 	[P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
4316 	[P_RS_IS_IN_SYNC]   = { sizeof(struct p_block_ack), got_IsInSync },
4317 	[P_MAX_CMD]	    = { 0, NULL },
4318 	};
4319 	if (cmd > P_MAX_CMD || asender_tbl[cmd].process == NULL)
4320 		return NULL;
4321 	return &asender_tbl[cmd];
4322 }
4323 
4324 int drbd_asender(struct drbd_thread *thi)
4325 {
4326 	struct drbd_conf *mdev = thi->mdev;
4327 	struct p_header *h = &mdev->meta.rbuf.header;
4328 	struct asender_cmd *cmd = NULL;
4329 
4330 	int rv, len;
4331 	void *buf    = h;
4332 	int received = 0;
4333 	int expect   = sizeof(struct p_header);
4334 	int empty;
4335 
4336 	sprintf(current->comm, "drbd%d_asender", mdev_to_minor(mdev));
4337 
4338 	current->policy = SCHED_RR;  /* Make this a realtime task! */
4339 	current->rt_priority = 2;    /* more important than all other tasks */
4340 
4341 	while (get_t_state(thi) == Running) {
4342 		drbd_thread_current_set_cpu(mdev);
4343 		if (test_and_clear_bit(SEND_PING, &mdev->flags)) {
4344 			ERR_IF(!drbd_send_ping(mdev)) goto reconnect;
4345 			mdev->meta.socket->sk->sk_rcvtimeo =
4346 				mdev->net_conf->ping_timeo*HZ/10;
4347 		}
4348 
4349 		/* conditionally cork;
4350 		 * it may hurt latency if we cork without much to send */
4351 		if (!mdev->net_conf->no_cork &&
4352 			3 < atomic_read(&mdev->unacked_cnt))
4353 			drbd_tcp_cork(mdev->meta.socket);
4354 		while (1) {
4355 			clear_bit(SIGNAL_ASENDER, &mdev->flags);
4356 			flush_signals(current);
4357 			if (!drbd_process_done_ee(mdev)) {
4358 				dev_err(DEV, "process_done_ee() = NOT_OK\n");
4359 				goto reconnect;
4360 			}
4361 			/* to avoid race with newly queued ACKs */
4362 			set_bit(SIGNAL_ASENDER, &mdev->flags);
4363 			spin_lock_irq(&mdev->req_lock);
4364 			empty = list_empty(&mdev->done_ee);
4365 			spin_unlock_irq(&mdev->req_lock);
4366 			/* new ack may have been queued right here,
4367 			 * but then there is also a signal pending,
4368 			 * and we start over... */
4369 			if (empty)
4370 				break;
4371 		}
4372 		/* but unconditionally uncork unless disabled */
4373 		if (!mdev->net_conf->no_cork)
4374 			drbd_tcp_uncork(mdev->meta.socket);
4375 
4376 		/* short circuit, recv_msg would return EINTR anyways. */
4377 		if (signal_pending(current))
4378 			continue;
4379 
4380 		rv = drbd_recv_short(mdev, mdev->meta.socket,
4381 				     buf, expect-received, 0);
4382 		clear_bit(SIGNAL_ASENDER, &mdev->flags);
4383 
4384 		flush_signals(current);
4385 
4386 		/* Note:
4387 		 * -EINTR	 (on meta) we got a signal
4388 		 * -EAGAIN	 (on meta) rcvtimeo expired
4389 		 * -ECONNRESET	 other side closed the connection
4390 		 * -ERESTARTSYS  (on data) we got a signal
4391 		 * rv <  0	 other than above: unexpected error!
4392 		 * rv == expected: full header or command
4393 		 * rv <  expected: "woken" by signal during receive
4394 		 * rv == 0	 : "connection shut down by peer"
4395 		 */
4396 		if (likely(rv > 0)) {
4397 			received += rv;
4398 			buf	 += rv;
4399 		} else if (rv == 0) {
4400 			dev_err(DEV, "meta connection shut down by peer.\n");
4401 			goto reconnect;
4402 		} else if (rv == -EAGAIN) {
4403 			if (mdev->meta.socket->sk->sk_rcvtimeo ==
4404 			    mdev->net_conf->ping_timeo*HZ/10) {
4405 				dev_err(DEV, "PingAck did not arrive in time.\n");
4406 				goto reconnect;
4407 			}
4408 			set_bit(SEND_PING, &mdev->flags);
4409 			continue;
4410 		} else if (rv == -EINTR) {
4411 			continue;
4412 		} else {
4413 			dev_err(DEV, "sock_recvmsg returned %d\n", rv);
4414 			goto reconnect;
4415 		}
4416 
4417 		if (received == expect && cmd == NULL) {
4418 			if (unlikely(h->magic != BE_DRBD_MAGIC)) {
4419 				dev_err(DEV, "magic?? on meta m: 0x%lx c: %d l: %d\n",
4420 				    (long)be32_to_cpu(h->magic),
4421 				    h->command, h->length);
4422 				goto reconnect;
4423 			}
4424 			cmd = get_asender_cmd(be16_to_cpu(h->command));
4425 			len = be16_to_cpu(h->length);
4426 			if (unlikely(cmd == NULL)) {
4427 				dev_err(DEV, "unknown command?? on meta m: 0x%lx c: %d l: %d\n",
4428 				    (long)be32_to_cpu(h->magic),
4429 				    h->command, h->length);
4430 				goto disconnect;
4431 			}
4432 			expect = cmd->pkt_size;
4433 			ERR_IF(len != expect-sizeof(struct p_header))
4434 				goto reconnect;
4435 		}
4436 		if (received == expect) {
4437 			D_ASSERT(cmd != NULL);
4438 			if (!cmd->process(mdev, h))
4439 				goto reconnect;
4440 
4441 			buf	 = h;
4442 			received = 0;
4443 			expect	 = sizeof(struct p_header);
4444 			cmd	 = NULL;
4445 		}
4446 	}
4447 
4448 	if (0) {
4449 reconnect:
4450 		drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE));
4451 	}
4452 	if (0) {
4453 disconnect:
4454 		drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
4455 	}
4456 	clear_bit(SIGNAL_ASENDER, &mdev->flags);
4457 
4458 	D_ASSERT(mdev->state.conn < C_CONNECTED);
4459 	dev_info(DEV, "asender terminated\n");
4460 
4461 	return 0;
4462 }
4463