xref: /linux/drivers/block/drbd/drbd_int.h (revision d5b27b01f17ef1f0badc45f9eea521be3457c9cb)
1b411b363SPhilipp Reisner /*
2b411b363SPhilipp Reisner   drbd_int.h
3b411b363SPhilipp Reisner 
4b411b363SPhilipp Reisner   This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5b411b363SPhilipp Reisner 
6b411b363SPhilipp Reisner   Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7b411b363SPhilipp Reisner   Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8b411b363SPhilipp Reisner   Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9b411b363SPhilipp Reisner 
10b411b363SPhilipp Reisner   drbd is free software; you can redistribute it and/or modify
11b411b363SPhilipp Reisner   it under the terms of the GNU General Public License as published by
12b411b363SPhilipp Reisner   the Free Software Foundation; either version 2, or (at your option)
13b411b363SPhilipp Reisner   any later version.
14b411b363SPhilipp Reisner 
15b411b363SPhilipp Reisner   drbd is distributed in the hope that it will be useful,
16b411b363SPhilipp Reisner   but WITHOUT ANY WARRANTY; without even the implied warranty of
17b411b363SPhilipp Reisner   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	See the
18b411b363SPhilipp Reisner   GNU General Public License for more details.
19b411b363SPhilipp Reisner 
20b411b363SPhilipp Reisner   You should have received a copy of the GNU General Public License
21b411b363SPhilipp Reisner   along with drbd; see the file COPYING.  If not, write to
22b411b363SPhilipp Reisner   the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23b411b363SPhilipp Reisner 
24b411b363SPhilipp Reisner */
25b411b363SPhilipp Reisner 
26b411b363SPhilipp Reisner #ifndef _DRBD_INT_H
27b411b363SPhilipp Reisner #define _DRBD_INT_H
28b411b363SPhilipp Reisner 
29b411b363SPhilipp Reisner #include <linux/compiler.h>
30b411b363SPhilipp Reisner #include <linux/types.h>
31b411b363SPhilipp Reisner #include <linux/version.h>
32b411b363SPhilipp Reisner #include <linux/list.h>
33b411b363SPhilipp Reisner #include <linux/sched.h>
34b411b363SPhilipp Reisner #include <linux/bitops.h>
35b411b363SPhilipp Reisner #include <linux/slab.h>
36b411b363SPhilipp Reisner #include <linux/crypto.h>
37132cc538SRandy Dunlap #include <linux/ratelimit.h>
38b411b363SPhilipp Reisner #include <linux/tcp.h>
39b411b363SPhilipp Reisner #include <linux/mutex.h>
40b411b363SPhilipp Reisner #include <linux/major.h>
41b411b363SPhilipp Reisner #include <linux/blkdev.h>
42b411b363SPhilipp Reisner #include <linux/genhd.h>
43062e879cSPhilipp Reisner #include <linux/idr.h>
44b411b363SPhilipp Reisner #include <net/tcp.h>
45b411b363SPhilipp Reisner #include <linux/lru_cache.h>
4670c71606SPaul Gortmaker #include <linux/prefetch.h>
473b98c0c2SLars Ellenberg #include <linux/drbd_genl_api.h>
48b8907339SPhilipp Reisner #include <linux/drbd.h>
49b8907339SPhilipp Reisner #include "drbd_state.h"
50b411b363SPhilipp Reisner 
51b411b363SPhilipp Reisner #ifdef __CHECKER__
52b411b363SPhilipp Reisner # define __protected_by(x)       __attribute__((require_context(x,1,999,"rdwr")))
53b411b363SPhilipp Reisner # define __protected_read_by(x)  __attribute__((require_context(x,1,999,"read")))
54b411b363SPhilipp Reisner # define __protected_write_by(x) __attribute__((require_context(x,1,999,"write")))
55b411b363SPhilipp Reisner # define __must_hold(x)       __attribute__((context(x,1,1), require_context(x,1,999,"call")))
56b411b363SPhilipp Reisner #else
57b411b363SPhilipp Reisner # define __protected_by(x)
58b411b363SPhilipp Reisner # define __protected_read_by(x)
59b411b363SPhilipp Reisner # define __protected_write_by(x)
60b411b363SPhilipp Reisner # define __must_hold(x)
61b411b363SPhilipp Reisner #endif
62b411b363SPhilipp Reisner 
63b411b363SPhilipp Reisner #define __no_warn(lock, stmt) do { __acquire(lock); stmt; __release(lock); } while (0)
64b411b363SPhilipp Reisner 
65b411b363SPhilipp Reisner /* module parameter, defined in drbd_main.c */
66b411b363SPhilipp Reisner extern unsigned int minor_count;
67b411b363SPhilipp Reisner extern int disable_sendpage;
68b411b363SPhilipp Reisner extern int allow_oos;
69b411b363SPhilipp Reisner 
70b411b363SPhilipp Reisner #ifdef CONFIG_DRBD_FAULT_INJECTION
71b411b363SPhilipp Reisner extern int enable_faults;
72b411b363SPhilipp Reisner extern int fault_rate;
73b411b363SPhilipp Reisner extern int fault_devs;
74b411b363SPhilipp Reisner #endif
75b411b363SPhilipp Reisner 
76b411b363SPhilipp Reisner extern char usermode_helper[];
77b411b363SPhilipp Reisner 
78b411b363SPhilipp Reisner 
79b411b363SPhilipp Reisner /* I don't remember why XCPU ...
80b411b363SPhilipp Reisner  * This is used to wake the asender,
81b411b363SPhilipp Reisner  * and to interrupt sending the sending task
82b411b363SPhilipp Reisner  * on disconnect.
83b411b363SPhilipp Reisner  */
84b411b363SPhilipp Reisner #define DRBD_SIG SIGXCPU
85b411b363SPhilipp Reisner 
86b411b363SPhilipp Reisner /* This is used to stop/restart our threads.
87b411b363SPhilipp Reisner  * Cannot use SIGTERM nor SIGKILL, since these
88b411b363SPhilipp Reisner  * are sent out by init on runlevel changes
89b411b363SPhilipp Reisner  * I choose SIGHUP for now.
90b411b363SPhilipp Reisner  */
91b411b363SPhilipp Reisner #define DRBD_SIGKILL SIGHUP
92b411b363SPhilipp Reisner 
93b411b363SPhilipp Reisner #define ID_IN_SYNC      (4711ULL)
94b411b363SPhilipp Reisner #define ID_OUT_OF_SYNC  (4712ULL)
95b411b363SPhilipp Reisner #define ID_SYNCER (-1ULL)
96579b57edSAndreas Gruenbacher 
974a23f264SPhilipp Reisner #define UUID_NEW_BM_OFFSET ((u64)0x0001000000000000ULL)
98b411b363SPhilipp Reisner 
99b411b363SPhilipp Reisner struct drbd_conf;
1002111438bSPhilipp Reisner struct drbd_tconn;
101b411b363SPhilipp Reisner 
102b411b363SPhilipp Reisner 
103b411b363SPhilipp Reisner /* to shorten dev_warn(DEV, "msg"); and relatives statements */
104b411b363SPhilipp Reisner #define DEV (disk_to_dev(mdev->vdisk))
105b411b363SPhilipp Reisner 
10660ae4966SPhilipp Reisner #define conn_printk(LEVEL, TCONN, FMT, ARGS...) \
10760ae4966SPhilipp Reisner 	printk(LEVEL "d-con %s: " FMT, TCONN->name , ## ARGS)
10860ae4966SPhilipp Reisner #define conn_alert(TCONN, FMT, ARGS...)  conn_printk(KERN_ALERT, TCONN, FMT, ## ARGS)
10960ae4966SPhilipp Reisner #define conn_crit(TCONN, FMT, ARGS...)   conn_printk(KERN_CRIT, TCONN, FMT, ## ARGS)
11060ae4966SPhilipp Reisner #define conn_err(TCONN, FMT, ARGS...)    conn_printk(KERN_ERR, TCONN, FMT, ## ARGS)
11160ae4966SPhilipp Reisner #define conn_warn(TCONN, FMT, ARGS...)   conn_printk(KERN_WARNING, TCONN, FMT, ## ARGS)
11260ae4966SPhilipp Reisner #define conn_notice(TCONN, FMT, ARGS...) conn_printk(KERN_NOTICE, TCONN, FMT, ## ARGS)
11360ae4966SPhilipp Reisner #define conn_info(TCONN, FMT, ARGS...)   conn_printk(KERN_INFO, TCONN, FMT, ## ARGS)
11460ae4966SPhilipp Reisner #define conn_dbg(TCONN, FMT, ARGS...)    conn_printk(KERN_DEBUG, TCONN, FMT, ## ARGS)
11560ae4966SPhilipp Reisner 
116b411b363SPhilipp Reisner #define D_ASSERT(exp)	if (!(exp)) \
117b411b363SPhilipp Reisner 	 dev_err(DEV, "ASSERT( " #exp " ) in %s:%d\n", __FILE__, __LINE__)
118b411b363SPhilipp Reisner 
119841ce241SAndreas Gruenbacher /**
120841ce241SAndreas Gruenbacher  * expect  -  Make an assertion
121841ce241SAndreas Gruenbacher  *
122841ce241SAndreas Gruenbacher  * Unlike the assert macro, this macro returns a boolean result.
123841ce241SAndreas Gruenbacher  */
124841ce241SAndreas Gruenbacher #define expect(exp) ({								\
125841ce241SAndreas Gruenbacher 		bool _bool = (exp);						\
126841ce241SAndreas Gruenbacher 		if (!_bool)							\
127841ce241SAndreas Gruenbacher 			dev_err(DEV, "ASSERTION %s FAILED in %s\n",		\
128841ce241SAndreas Gruenbacher 			        #exp, __func__);				\
129841ce241SAndreas Gruenbacher 		_bool;								\
130841ce241SAndreas Gruenbacher 		})
131b411b363SPhilipp Reisner 
132b411b363SPhilipp Reisner /* Defines to control fault insertion */
133b411b363SPhilipp Reisner enum {
134b411b363SPhilipp Reisner 	DRBD_FAULT_MD_WR = 0,	/* meta data write */
135b411b363SPhilipp Reisner 	DRBD_FAULT_MD_RD = 1,	/*           read  */
136b411b363SPhilipp Reisner 	DRBD_FAULT_RS_WR = 2,	/* resync          */
137b411b363SPhilipp Reisner 	DRBD_FAULT_RS_RD = 3,
138b411b363SPhilipp Reisner 	DRBD_FAULT_DT_WR = 4,	/* data            */
139b411b363SPhilipp Reisner 	DRBD_FAULT_DT_RD = 5,
140b411b363SPhilipp Reisner 	DRBD_FAULT_DT_RA = 6,	/* data read ahead */
141b411b363SPhilipp Reisner 	DRBD_FAULT_BM_ALLOC = 7,	/* bitmap allocation */
142b411b363SPhilipp Reisner 	DRBD_FAULT_AL_EE = 8,	/* alloc ee */
1436b4388acSPhilipp Reisner 	DRBD_FAULT_RECEIVE = 9, /* Changes some bytes upon receiving a [rs]data block */
144b411b363SPhilipp Reisner 
145b411b363SPhilipp Reisner 	DRBD_FAULT_MAX,
146b411b363SPhilipp Reisner };
147b411b363SPhilipp Reisner 
148b411b363SPhilipp Reisner extern unsigned int
149b411b363SPhilipp Reisner _drbd_insert_fault(struct drbd_conf *mdev, unsigned int type);
1500cf9d27eSAndreas Gruenbacher 
151b411b363SPhilipp Reisner static inline int
152b411b363SPhilipp Reisner drbd_insert_fault(struct drbd_conf *mdev, unsigned int type) {
1530cf9d27eSAndreas Gruenbacher #ifdef CONFIG_DRBD_FAULT_INJECTION
154b411b363SPhilipp Reisner 	return fault_rate &&
155b411b363SPhilipp Reisner 		(enable_faults & (1<<type)) &&
156b411b363SPhilipp Reisner 		_drbd_insert_fault(mdev, type);
157b411b363SPhilipp Reisner #else
1580cf9d27eSAndreas Gruenbacher 	return 0;
159b411b363SPhilipp Reisner #endif
1600cf9d27eSAndreas Gruenbacher }
161b411b363SPhilipp Reisner 
162b411b363SPhilipp Reisner /* integer division, round _UP_ to the next integer */
163b411b363SPhilipp Reisner #define div_ceil(A, B) ((A)/(B) + ((A)%(B) ? 1 : 0))
164b411b363SPhilipp Reisner /* usual integer division */
165b411b363SPhilipp Reisner #define div_floor(A, B) ((A)/(B))
166b411b363SPhilipp Reisner 
167b411b363SPhilipp Reisner extern struct ratelimit_state drbd_ratelimit_state;
168c141ebdaSPhilipp Reisner extern struct idr minors; /* RCU, updates: genl_lock() */
169c141ebdaSPhilipp Reisner extern struct list_head drbd_tconns; /* RCU, updates: genl_lock() */
170b411b363SPhilipp Reisner 
171b411b363SPhilipp Reisner /* on the wire */
172d8763023SAndreas Gruenbacher enum drbd_packet {
173b411b363SPhilipp Reisner 	/* receiver (data socket) */
174b411b363SPhilipp Reisner 	P_DATA		      = 0x00,
175b411b363SPhilipp Reisner 	P_DATA_REPLY	      = 0x01, /* Response to P_DATA_REQUEST */
176b411b363SPhilipp Reisner 	P_RS_DATA_REPLY	      = 0x02, /* Response to P_RS_DATA_REQUEST */
177b411b363SPhilipp Reisner 	P_BARRIER	      = 0x03,
178b411b363SPhilipp Reisner 	P_BITMAP	      = 0x04,
179b411b363SPhilipp Reisner 	P_BECOME_SYNC_TARGET  = 0x05,
180b411b363SPhilipp Reisner 	P_BECOME_SYNC_SOURCE  = 0x06,
181b411b363SPhilipp Reisner 	P_UNPLUG_REMOTE	      = 0x07, /* Used at various times to hint the peer */
182b411b363SPhilipp Reisner 	P_DATA_REQUEST	      = 0x08, /* Used to ask for a data block */
183b411b363SPhilipp Reisner 	P_RS_DATA_REQUEST     = 0x09, /* Used to ask for a data block for resync */
184b411b363SPhilipp Reisner 	P_SYNC_PARAM	      = 0x0a,
185b411b363SPhilipp Reisner 	P_PROTOCOL	      = 0x0b,
186b411b363SPhilipp Reisner 	P_UUIDS		      = 0x0c,
187b411b363SPhilipp Reisner 	P_SIZES		      = 0x0d,
188b411b363SPhilipp Reisner 	P_STATE		      = 0x0e,
189b411b363SPhilipp Reisner 	P_SYNC_UUID	      = 0x0f,
190b411b363SPhilipp Reisner 	P_AUTH_CHALLENGE      = 0x10,
191b411b363SPhilipp Reisner 	P_AUTH_RESPONSE	      = 0x11,
192b411b363SPhilipp Reisner 	P_STATE_CHG_REQ	      = 0x12,
193b411b363SPhilipp Reisner 
194b411b363SPhilipp Reisner 	/* asender (meta socket */
195b411b363SPhilipp Reisner 	P_PING		      = 0x13,
196b411b363SPhilipp Reisner 	P_PING_ACK	      = 0x14,
197b411b363SPhilipp Reisner 	P_RECV_ACK	      = 0x15, /* Used in protocol B */
198b411b363SPhilipp Reisner 	P_WRITE_ACK	      = 0x16, /* Used in protocol C */
199b411b363SPhilipp Reisner 	P_RS_WRITE_ACK	      = 0x17, /* Is a P_WRITE_ACK, additionally call set_in_sync(). */
2007be8da07SAndreas Gruenbacher 	P_DISCARD_WRITE	      = 0x18, /* Used in proto C, two-primaries conflict detection */
201b411b363SPhilipp Reisner 	P_NEG_ACK	      = 0x19, /* Sent if local disk is unusable */
202b411b363SPhilipp Reisner 	P_NEG_DREPLY	      = 0x1a, /* Local disk is broken... */
203b411b363SPhilipp Reisner 	P_NEG_RS_DREPLY	      = 0x1b, /* Local disk is broken... */
204b411b363SPhilipp Reisner 	P_BARRIER_ACK	      = 0x1c,
205b411b363SPhilipp Reisner 	P_STATE_CHG_REPLY     = 0x1d,
206b411b363SPhilipp Reisner 
207b411b363SPhilipp Reisner 	/* "new" commands, no longer fitting into the ordering scheme above */
208b411b363SPhilipp Reisner 
209b411b363SPhilipp Reisner 	P_OV_REQUEST	      = 0x1e, /* data socket */
210b411b363SPhilipp Reisner 	P_OV_REPLY	      = 0x1f,
211b411b363SPhilipp Reisner 	P_OV_RESULT	      = 0x20, /* meta socket */
212b411b363SPhilipp Reisner 	P_CSUM_RS_REQUEST     = 0x21, /* data socket */
213b411b363SPhilipp Reisner 	P_RS_IS_IN_SYNC	      = 0x22, /* meta socket */
214b411b363SPhilipp Reisner 	P_SYNC_PARAM89	      = 0x23, /* data socket, protocol version 89 replacement for P_SYNC_PARAM */
215b411b363SPhilipp Reisner 	P_COMPRESSED_BITMAP   = 0x24, /* compressed or otherwise encoded bitmap transfer */
2160ced55a3SPhilipp Reisner 	/* P_CKPT_FENCE_REQ      = 0x25, * currently reserved for protocol D */
2170ced55a3SPhilipp Reisner 	/* P_CKPT_DISABLE_REQ    = 0x26, * currently reserved for protocol D */
2180ced55a3SPhilipp Reisner 	P_DELAY_PROBE         = 0x27, /* is used on BOTH sockets */
21973a01a18SPhilipp Reisner 	P_OUT_OF_SYNC         = 0x28, /* Mark as out of sync (Outrunning), data socket */
220d612d309SPhilipp Reisner 	P_RS_CANCEL           = 0x29, /* meta: Used to cancel RS_DATA_REQUEST packet by SyncSource */
221cf29c9d8SPhilipp Reisner 	P_CONN_ST_CHG_REQ     = 0x2a, /* data sock: Connection wide state request */
222cf29c9d8SPhilipp Reisner 	P_CONN_ST_CHG_REPLY   = 0x2b, /* meta sock: Connection side state req reply */
2237be8da07SAndreas Gruenbacher 	P_RETRY_WRITE	      = 0x2c, /* Protocol C: retry conflicting write request */
224036b17eaSPhilipp Reisner 	P_PROTOCOL_UPDATE     = 0x2d, /* data sock: is used in established connections */
225b411b363SPhilipp Reisner 
226b411b363SPhilipp Reisner 	P_MAY_IGNORE	      = 0x100, /* Flag to test if (cmd > P_MAY_IGNORE) ... */
227b411b363SPhilipp Reisner 	P_MAX_OPT_CMD	      = 0x101,
228b411b363SPhilipp Reisner 
229b411b363SPhilipp Reisner 	/* special command ids for handshake */
230b411b363SPhilipp Reisner 
231e5d6f33aSAndreas Gruenbacher 	P_INITIAL_META	      = 0xfff1, /* First Packet on the MetaSock */
232e5d6f33aSAndreas Gruenbacher 	P_INITIAL_DATA	      = 0xfff2, /* First Packet on the Socket */
233b411b363SPhilipp Reisner 
2346038178eSAndreas Gruenbacher 	P_CONNECTION_FEATURES = 0xfffe	/* FIXED for the next century! */
235b411b363SPhilipp Reisner };
236b411b363SPhilipp Reisner 
237d8763023SAndreas Gruenbacher extern const char *cmdname(enum drbd_packet cmd);
238b411b363SPhilipp Reisner 
239b411b363SPhilipp Reisner /* for sending/receiving the bitmap,
240b411b363SPhilipp Reisner  * possibly in some encoding scheme */
241b411b363SPhilipp Reisner struct bm_xfer_ctx {
242b411b363SPhilipp Reisner 	/* "const"
243b411b363SPhilipp Reisner 	 * stores total bits and long words
244b411b363SPhilipp Reisner 	 * of the bitmap, so we don't need to
245b411b363SPhilipp Reisner 	 * call the accessor functions over and again. */
246b411b363SPhilipp Reisner 	unsigned long bm_bits;
247b411b363SPhilipp Reisner 	unsigned long bm_words;
248b411b363SPhilipp Reisner 	/* during xfer, current position within the bitmap */
249b411b363SPhilipp Reisner 	unsigned long bit_offset;
250b411b363SPhilipp Reisner 	unsigned long word_offset;
251b411b363SPhilipp Reisner 
252b411b363SPhilipp Reisner 	/* statistics; index: (h->command == P_BITMAP) */
253b411b363SPhilipp Reisner 	unsigned packets[2];
254b411b363SPhilipp Reisner 	unsigned bytes[2];
255b411b363SPhilipp Reisner };
256b411b363SPhilipp Reisner 
257b411b363SPhilipp Reisner extern void INFO_bm_xfer_stats(struct drbd_conf *mdev,
258b411b363SPhilipp Reisner 		const char *direction, struct bm_xfer_ctx *c);
259b411b363SPhilipp Reisner 
260b411b363SPhilipp Reisner static inline void bm_xfer_ctx_bit_to_word_offset(struct bm_xfer_ctx *c)
261b411b363SPhilipp Reisner {
262b411b363SPhilipp Reisner 	/* word_offset counts "native long words" (32 or 64 bit),
263b411b363SPhilipp Reisner 	 * aligned at 64 bit.
264b411b363SPhilipp Reisner 	 * Encoded packet may end at an unaligned bit offset.
265b411b363SPhilipp Reisner 	 * In case a fallback clear text packet is transmitted in
266b411b363SPhilipp Reisner 	 * between, we adjust this offset back to the last 64bit
267b411b363SPhilipp Reisner 	 * aligned "native long word", which makes coding and decoding
268b411b363SPhilipp Reisner 	 * the plain text bitmap much more convenient.  */
269b411b363SPhilipp Reisner #if BITS_PER_LONG == 64
270b411b363SPhilipp Reisner 	c->word_offset = c->bit_offset >> 6;
271b411b363SPhilipp Reisner #elif BITS_PER_LONG == 32
272b411b363SPhilipp Reisner 	c->word_offset = c->bit_offset >> 5;
273b411b363SPhilipp Reisner 	c->word_offset &= ~(1UL);
274b411b363SPhilipp Reisner #else
275b411b363SPhilipp Reisner # error "unsupported BITS_PER_LONG"
276b411b363SPhilipp Reisner #endif
277b411b363SPhilipp Reisner }
278b411b363SPhilipp Reisner 
279b411b363SPhilipp Reisner #ifndef __packed
280b411b363SPhilipp Reisner #define __packed __attribute__((packed))
281b411b363SPhilipp Reisner #endif
282b411b363SPhilipp Reisner 
283b411b363SPhilipp Reisner /* This is the layout for a packet on the wire.
284b411b363SPhilipp Reisner  * The byteorder is the network byte order.
285b411b363SPhilipp Reisner  *     (except block_id and barrier fields.
286b411b363SPhilipp Reisner  *	these are pointers to local structs
287b411b363SPhilipp Reisner  *	and have no relevance for the partner,
288b411b363SPhilipp Reisner  *	which just echoes them as received.)
289b411b363SPhilipp Reisner  *
290b411b363SPhilipp Reisner  * NOTE that the payload starts at a long aligned offset,
291b411b363SPhilipp Reisner  * regardless of 32 or 64 bit arch!
292b411b363SPhilipp Reisner  */
2930b70a13dSPhilipp Reisner struct p_header80 {
294b411b363SPhilipp Reisner 	u32	  magic;
295b411b363SPhilipp Reisner 	u16	  command;
296b411b363SPhilipp Reisner 	u16	  length;	/* bytes of data after this header */
297b411b363SPhilipp Reisner } __packed;
2980b70a13dSPhilipp Reisner 
2990b70a13dSPhilipp Reisner /* Header for big packets, Used for data packets exceeding 64kB */
3000b70a13dSPhilipp Reisner struct p_header95 {
3010b70a13dSPhilipp Reisner 	u16	  magic;	/* use DRBD_MAGIC_BIG here */
3020b70a13dSPhilipp Reisner 	u16	  command;
303b55d84baSAndreas Gruenbacher 	u32	  length;
3040b70a13dSPhilipp Reisner } __packed;
3050b70a13dSPhilipp Reisner 
3060c8e36d9SAndreas Gruenbacher struct p_header100 {
3070c8e36d9SAndreas Gruenbacher 	u32	  magic;
3080c8e36d9SAndreas Gruenbacher 	u16	  volume;
3090c8e36d9SAndreas Gruenbacher 	u16	  command;
3100c8e36d9SAndreas Gruenbacher 	u32	  length;
3110c8e36d9SAndreas Gruenbacher 	u32	  pad;
3120c8e36d9SAndreas Gruenbacher } __packed;
3130c8e36d9SAndreas Gruenbacher 
31452b061a4SAndreas Gruenbacher extern unsigned int drbd_header_size(struct drbd_tconn *tconn);
31552b061a4SAndreas Gruenbacher 
316b411b363SPhilipp Reisner /* these defines must not be changed without changing the protocol version */
31776d2e7ecSPhilipp Reisner #define DP_HARDBARRIER	      1 /* depricated */
31876d2e7ecSPhilipp Reisner #define DP_RW_SYNC	      2 /* equals REQ_SYNC    */
319b411b363SPhilipp Reisner #define DP_MAY_SET_IN_SYNC    4
320721a9602SJens Axboe #define DP_UNPLUG             8 /* not used anymore   */
32176d2e7ecSPhilipp Reisner #define DP_FUA               16 /* equals REQ_FUA     */
32276d2e7ecSPhilipp Reisner #define DP_FLUSH             32 /* equals REQ_FLUSH   */
32376d2e7ecSPhilipp Reisner #define DP_DISCARD           64 /* equals REQ_DISCARD */
324303d1448SPhilipp Reisner #define DP_SEND_RECEIVE_ACK 128 /* This is a proto B write request */
325303d1448SPhilipp Reisner #define DP_SEND_WRITE_ACK   256 /* This is a proto C write request */
326b411b363SPhilipp Reisner 
327b411b363SPhilipp Reisner struct p_data {
328b411b363SPhilipp Reisner 	u64	    sector;    /* 64 bits sector number */
329b411b363SPhilipp Reisner 	u64	    block_id;  /* to identify the request in protocol B&C */
330b411b363SPhilipp Reisner 	u32	    seq_num;
331b411b363SPhilipp Reisner 	u32	    dp_flags;
332b411b363SPhilipp Reisner } __packed;
333b411b363SPhilipp Reisner 
334b411b363SPhilipp Reisner /*
335b411b363SPhilipp Reisner  * commands which share a struct:
336b411b363SPhilipp Reisner  *  p_block_ack:
337b411b363SPhilipp Reisner  *   P_RECV_ACK (proto B), P_WRITE_ACK (proto C),
3387be8da07SAndreas Gruenbacher  *   P_DISCARD_WRITE (proto C, two-primaries conflict detection)
339b411b363SPhilipp Reisner  *  p_block_req:
340b411b363SPhilipp Reisner  *   P_DATA_REQUEST, P_RS_DATA_REQUEST
341b411b363SPhilipp Reisner  */
342b411b363SPhilipp Reisner struct p_block_ack {
343b411b363SPhilipp Reisner 	u64	    sector;
344b411b363SPhilipp Reisner 	u64	    block_id;
345b411b363SPhilipp Reisner 	u32	    blksize;
346b411b363SPhilipp Reisner 	u32	    seq_num;
347b411b363SPhilipp Reisner } __packed;
348b411b363SPhilipp Reisner 
349b411b363SPhilipp Reisner struct p_block_req {
350b411b363SPhilipp Reisner 	u64 sector;
351b411b363SPhilipp Reisner 	u64 block_id;
352b411b363SPhilipp Reisner 	u32 blksize;
353b411b363SPhilipp Reisner 	u32 pad;	/* to multiple of 8 Byte */
354b411b363SPhilipp Reisner } __packed;
355b411b363SPhilipp Reisner 
356b411b363SPhilipp Reisner /*
357b411b363SPhilipp Reisner  * commands with their own struct for additional fields:
3586038178eSAndreas Gruenbacher  *   P_CONNECTION_FEATURES
359b411b363SPhilipp Reisner  *   P_BARRIER
360b411b363SPhilipp Reisner  *   P_BARRIER_ACK
361b411b363SPhilipp Reisner  *   P_SYNC_PARAM
362b411b363SPhilipp Reisner  *   ReportParams
363b411b363SPhilipp Reisner  */
364b411b363SPhilipp Reisner 
3656038178eSAndreas Gruenbacher struct p_connection_features {
366b411b363SPhilipp Reisner 	u32 protocol_min;
367b411b363SPhilipp Reisner 	u32 feature_flags;
368b411b363SPhilipp Reisner 	u32 protocol_max;
369b411b363SPhilipp Reisner 
370b411b363SPhilipp Reisner 	/* should be more than enough for future enhancements
371d942ae44SPhilipp Reisner 	 * for now, feature_flags and the reserved array shall be zero.
372b411b363SPhilipp Reisner 	 */
373b411b363SPhilipp Reisner 
374b411b363SPhilipp Reisner 	u32 _pad;
375d942ae44SPhilipp Reisner 	u64 reserved[7];
376b411b363SPhilipp Reisner } __packed;
377b411b363SPhilipp Reisner 
378b411b363SPhilipp Reisner struct p_barrier {
379b411b363SPhilipp Reisner 	u32 barrier;	/* barrier number _handle_ only */
380b411b363SPhilipp Reisner 	u32 pad;	/* to multiple of 8 Byte */
381b411b363SPhilipp Reisner } __packed;
382b411b363SPhilipp Reisner 
383b411b363SPhilipp Reisner struct p_barrier_ack {
384b411b363SPhilipp Reisner 	u32 barrier;
385b411b363SPhilipp Reisner 	u32 set_size;
386b411b363SPhilipp Reisner } __packed;
387b411b363SPhilipp Reisner 
388b411b363SPhilipp Reisner struct p_rs_param {
3896394b935SAndreas Gruenbacher 	u32 resync_rate;
390b411b363SPhilipp Reisner 
391b411b363SPhilipp Reisner 	      /* Since protocol version 88 and higher. */
392b411b363SPhilipp Reisner 	char verify_alg[0];
393b411b363SPhilipp Reisner } __packed;
394b411b363SPhilipp Reisner 
395b411b363SPhilipp Reisner struct p_rs_param_89 {
3966394b935SAndreas Gruenbacher 	u32 resync_rate;
397b411b363SPhilipp Reisner         /* protocol version 89: */
398b411b363SPhilipp Reisner 	char verify_alg[SHARED_SECRET_MAX];
399b411b363SPhilipp Reisner 	char csums_alg[SHARED_SECRET_MAX];
400b411b363SPhilipp Reisner } __packed;
401b411b363SPhilipp Reisner 
4028e26f9ccSPhilipp Reisner struct p_rs_param_95 {
4036394b935SAndreas Gruenbacher 	u32 resync_rate;
4048e26f9ccSPhilipp Reisner 	char verify_alg[SHARED_SECRET_MAX];
4058e26f9ccSPhilipp Reisner 	char csums_alg[SHARED_SECRET_MAX];
4068e26f9ccSPhilipp Reisner 	u32 c_plan_ahead;
4078e26f9ccSPhilipp Reisner 	u32 c_delay_target;
4088e26f9ccSPhilipp Reisner 	u32 c_fill_target;
4098e26f9ccSPhilipp Reisner 	u32 c_max_rate;
4108e26f9ccSPhilipp Reisner } __packed;
4118e26f9ccSPhilipp Reisner 
412cf14c2e9SPhilipp Reisner enum drbd_conn_flags {
4136139f60dSAndreas Gruenbacher 	CF_DISCARD_MY_DATA = 1,
414cf14c2e9SPhilipp Reisner 	CF_DRY_RUN = 2,
415cf14c2e9SPhilipp Reisner };
416cf14c2e9SPhilipp Reisner 
417b411b363SPhilipp Reisner struct p_protocol {
418b411b363SPhilipp Reisner 	u32 protocol;
419b411b363SPhilipp Reisner 	u32 after_sb_0p;
420b411b363SPhilipp Reisner 	u32 after_sb_1p;
421b411b363SPhilipp Reisner 	u32 after_sb_2p;
422cf14c2e9SPhilipp Reisner 	u32 conn_flags;
423b411b363SPhilipp Reisner 	u32 two_primaries;
424b411b363SPhilipp Reisner 
425b411b363SPhilipp Reisner               /* Since protocol version 87 and higher. */
426b411b363SPhilipp Reisner 	char integrity_alg[0];
427b411b363SPhilipp Reisner 
428b411b363SPhilipp Reisner } __packed;
429b411b363SPhilipp Reisner 
430b411b363SPhilipp Reisner struct p_uuids {
431b411b363SPhilipp Reisner 	u64 uuid[UI_EXTENDED_SIZE];
432b411b363SPhilipp Reisner } __packed;
433b411b363SPhilipp Reisner 
434b411b363SPhilipp Reisner struct p_rs_uuid {
435b411b363SPhilipp Reisner 	u64	    uuid;
436b411b363SPhilipp Reisner } __packed;
437b411b363SPhilipp Reisner 
438b411b363SPhilipp Reisner struct p_sizes {
439b411b363SPhilipp Reisner 	u64	    d_size;  /* size of disk */
440b411b363SPhilipp Reisner 	u64	    u_size;  /* user requested size */
441b411b363SPhilipp Reisner 	u64	    c_size;  /* current exported size */
4421816a2b4SLars Ellenberg 	u32	    max_bio_size;  /* Maximal size of a BIO */
443e89b591cSPhilipp Reisner 	u16	    queue_order_type;  /* not yet implemented in DRBD*/
444e89b591cSPhilipp Reisner 	u16	    dds_flags; /* use enum dds_flags here. */
445b411b363SPhilipp Reisner } __packed;
446b411b363SPhilipp Reisner 
447b411b363SPhilipp Reisner struct p_state {
448b411b363SPhilipp Reisner 	u32	    state;
449b411b363SPhilipp Reisner } __packed;
450b411b363SPhilipp Reisner 
451b411b363SPhilipp Reisner struct p_req_state {
452b411b363SPhilipp Reisner 	u32	    mask;
453b411b363SPhilipp Reisner 	u32	    val;
454b411b363SPhilipp Reisner } __packed;
455b411b363SPhilipp Reisner 
456b411b363SPhilipp Reisner struct p_req_state_reply {
457b411b363SPhilipp Reisner 	u32	    retcode;
458b411b363SPhilipp Reisner } __packed;
459b411b363SPhilipp Reisner 
460b411b363SPhilipp Reisner struct p_drbd06_param {
461b411b363SPhilipp Reisner 	u64	  size;
462b411b363SPhilipp Reisner 	u32	  state;
463b411b363SPhilipp Reisner 	u32	  blksize;
464b411b363SPhilipp Reisner 	u32	  protocol;
465b411b363SPhilipp Reisner 	u32	  version;
466b411b363SPhilipp Reisner 	u32	  gen_cnt[5];
467b411b363SPhilipp Reisner 	u32	  bit_map_gen[5];
468b411b363SPhilipp Reisner } __packed;
469b411b363SPhilipp Reisner 
470b411b363SPhilipp Reisner struct p_discard {
471b411b363SPhilipp Reisner 	u64	    block_id;
472b411b363SPhilipp Reisner 	u32	    seq_num;
473b411b363SPhilipp Reisner 	u32	    pad;
474b411b363SPhilipp Reisner } __packed;
475b411b363SPhilipp Reisner 
47673a01a18SPhilipp Reisner struct p_block_desc {
47773a01a18SPhilipp Reisner 	u64 sector;
47873a01a18SPhilipp Reisner 	u32 blksize;
47973a01a18SPhilipp Reisner 	u32 pad;	/* to multiple of 8 Byte */
48073a01a18SPhilipp Reisner } __packed;
48173a01a18SPhilipp Reisner 
482b411b363SPhilipp Reisner /* Valid values for the encoding field.
483b411b363SPhilipp Reisner  * Bump proto version when changing this. */
484b411b363SPhilipp Reisner enum drbd_bitmap_code {
485b411b363SPhilipp Reisner 	/* RLE_VLI_Bytes = 0,
486b411b363SPhilipp Reisner 	 * and other bit variants had been defined during
487b411b363SPhilipp Reisner 	 * algorithm evaluation. */
488b411b363SPhilipp Reisner 	RLE_VLI_Bits = 2,
489b411b363SPhilipp Reisner };
490b411b363SPhilipp Reisner 
491b411b363SPhilipp Reisner struct p_compressed_bm {
492b411b363SPhilipp Reisner 	/* (encoding & 0x0f): actual encoding, see enum drbd_bitmap_code
493b411b363SPhilipp Reisner 	 * (encoding & 0x80): polarity (set/unset) of first runlength
494b411b363SPhilipp Reisner 	 * ((encoding >> 4) & 0x07): pad_bits, number of trailing zero bits
495b411b363SPhilipp Reisner 	 * used to pad up to head.length bytes
496b411b363SPhilipp Reisner 	 */
497b411b363SPhilipp Reisner 	u8 encoding;
498b411b363SPhilipp Reisner 
499b411b363SPhilipp Reisner 	u8 code[0];
500b411b363SPhilipp Reisner } __packed;
501b411b363SPhilipp Reisner 
5020b70a13dSPhilipp Reisner struct p_delay_probe93 {
5030ced55a3SPhilipp Reisner 	u32     seq_num; /* sequence number to match the two probe packets */
5040ced55a3SPhilipp Reisner 	u32     offset;  /* usecs the probe got sent after the reference time point */
5050ced55a3SPhilipp Reisner } __packed;
5060ced55a3SPhilipp Reisner 
50750d0b1adSAndreas Gruenbacher /*
50850d0b1adSAndreas Gruenbacher  * Bitmap packets need to fit within a single page on the sender and receiver,
50950d0b1adSAndreas Gruenbacher  * so we are limited to 4 KiB (and not to PAGE_SIZE, which can be bigger).
510b411b363SPhilipp Reisner  */
511e6ef8a5cSAndreas Gruenbacher #define DRBD_SOCKET_BUFFER_SIZE 4096
512e6ef8a5cSAndreas Gruenbacher 
513b411b363SPhilipp Reisner /**********************************************************************/
514b411b363SPhilipp Reisner enum drbd_thread_state {
515e77a0a5cSAndreas Gruenbacher 	NONE,
516e77a0a5cSAndreas Gruenbacher 	RUNNING,
517e77a0a5cSAndreas Gruenbacher 	EXITING,
518e77a0a5cSAndreas Gruenbacher 	RESTARTING
519b411b363SPhilipp Reisner };
520b411b363SPhilipp Reisner 
521b411b363SPhilipp Reisner struct drbd_thread {
522b411b363SPhilipp Reisner 	spinlock_t t_lock;
523b411b363SPhilipp Reisner 	struct task_struct *task;
524b411b363SPhilipp Reisner 	struct completion stop;
525b411b363SPhilipp Reisner 	enum drbd_thread_state t_state;
526b411b363SPhilipp Reisner 	int (*function) (struct drbd_thread *);
527392c8801SPhilipp Reisner 	struct drbd_tconn *tconn;
528b411b363SPhilipp Reisner 	int reset_cpu_mask;
529bed879aeSPhilipp Reisner 	char name[9];
530b411b363SPhilipp Reisner };
531b411b363SPhilipp Reisner 
532b411b363SPhilipp Reisner static inline enum drbd_thread_state get_t_state(struct drbd_thread *thi)
533b411b363SPhilipp Reisner {
534b411b363SPhilipp Reisner 	/* THINK testing the t_state seems to be uncritical in all cases
535b411b363SPhilipp Reisner 	 * (but thread_{start,stop}), so we can read it *without* the lock.
536b411b363SPhilipp Reisner 	 *	--lge */
537b411b363SPhilipp Reisner 
538b411b363SPhilipp Reisner 	smp_rmb();
539b411b363SPhilipp Reisner 	return thi->t_state;
540b411b363SPhilipp Reisner }
541b411b363SPhilipp Reisner 
542b411b363SPhilipp Reisner struct drbd_work {
543b411b363SPhilipp Reisner 	struct list_head list;
544309a8348SAndreas Gruenbacher 	int (*cb)(struct drbd_work *, int cancel);
54500d56944SPhilipp Reisner 	union {
546a21e9298SPhilipp Reisner 		struct drbd_conf *mdev;
54700d56944SPhilipp Reisner 		struct drbd_tconn *tconn;
54800d56944SPhilipp Reisner 	};
549b411b363SPhilipp Reisner };
550b411b363SPhilipp Reisner 
551ace652acSAndreas Gruenbacher #include "drbd_interval.h"
552ace652acSAndreas Gruenbacher 
5537be8da07SAndreas Gruenbacher extern int drbd_wait_misc(struct drbd_conf *, struct drbd_interval *);
5547be8da07SAndreas Gruenbacher 
555b411b363SPhilipp Reisner struct drbd_request {
556b411b363SPhilipp Reisner 	struct drbd_work w;
557b411b363SPhilipp Reisner 
558b411b363SPhilipp Reisner 	/* if local IO is not allowed, will be NULL.
559b411b363SPhilipp Reisner 	 * if local IO _is_ allowed, holds the locally submitted bio clone,
560b411b363SPhilipp Reisner 	 * or, after local IO completion, the ERR_PTR(error).
561fcefa62eSAndreas Gruenbacher 	 * see drbd_request_endio(). */
562b411b363SPhilipp Reisner 	struct bio *private_bio;
563b411b363SPhilipp Reisner 
564ace652acSAndreas Gruenbacher 	struct drbd_interval i;
565b411b363SPhilipp Reisner 	unsigned int epoch; /* barrier_nr */
566b411b363SPhilipp Reisner 
567b411b363SPhilipp Reisner 	/* barrier_nr: used to check on "completion" whether this req was in
568b411b363SPhilipp Reisner 	 * the current epoch, and we therefore have to close it,
569b411b363SPhilipp Reisner 	 * starting a new epoch...
570b411b363SPhilipp Reisner 	 */
571b411b363SPhilipp Reisner 
572b411b363SPhilipp Reisner 	struct list_head tl_requests; /* ring list in the transfer log */
573b411b363SPhilipp Reisner 	struct bio *master_bio;       /* master bio pointer */
574b411b363SPhilipp Reisner 	unsigned long rq_state; /* see comments above _req_mod() */
575b411b363SPhilipp Reisner 	unsigned long start_time;
576b411b363SPhilipp Reisner };
577b411b363SPhilipp Reisner 
578b411b363SPhilipp Reisner struct drbd_tl_epoch {
579b411b363SPhilipp Reisner 	struct drbd_work w;
580b411b363SPhilipp Reisner 	struct list_head requests; /* requests before */
581b411b363SPhilipp Reisner 	struct drbd_tl_epoch *next; /* pointer to the next barrier */
582b411b363SPhilipp Reisner 	unsigned int br_number;  /* the barriers identifier. */
5837e602c0aSPhilipp Reisner 	int n_writes;	/* number of requests attached before this barrier */
584b411b363SPhilipp Reisner };
585b411b363SPhilipp Reisner 
586b411b363SPhilipp Reisner struct drbd_epoch {
5879ed57dcbSLars Ellenberg 	struct drbd_tconn *tconn;
588b411b363SPhilipp Reisner 	struct list_head list;
589b411b363SPhilipp Reisner 	unsigned int barrier_nr;
590b411b363SPhilipp Reisner 	atomic_t epoch_size; /* increased on every request added. */
591b411b363SPhilipp Reisner 	atomic_t active;     /* increased on every req. added, and dec on every finished. */
592b411b363SPhilipp Reisner 	unsigned long flags;
593b411b363SPhilipp Reisner };
594b411b363SPhilipp Reisner 
595b411b363SPhilipp Reisner /* drbd_epoch flag bits */
596b411b363SPhilipp Reisner enum {
597b411b363SPhilipp Reisner 	DE_HAVE_BARRIER_NUMBER,
598b411b363SPhilipp Reisner };
599b411b363SPhilipp Reisner 
600b411b363SPhilipp Reisner enum epoch_event {
601b411b363SPhilipp Reisner 	EV_PUT,
602b411b363SPhilipp Reisner 	EV_GOT_BARRIER_NR,
603b411b363SPhilipp Reisner 	EV_BECAME_LAST,
604b411b363SPhilipp Reisner 	EV_CLEANUP = 32, /* used as flag */
605b411b363SPhilipp Reisner };
606b411b363SPhilipp Reisner 
607b411b363SPhilipp Reisner struct drbd_wq_barrier {
608b411b363SPhilipp Reisner 	struct drbd_work w;
609b411b363SPhilipp Reisner 	struct completion done;
610b411b363SPhilipp Reisner };
611b411b363SPhilipp Reisner 
612b411b363SPhilipp Reisner struct digest_info {
613b411b363SPhilipp Reisner 	int digest_size;
614b411b363SPhilipp Reisner 	void *digest;
615b411b363SPhilipp Reisner };
616b411b363SPhilipp Reisner 
617f6ffca9fSAndreas Gruenbacher struct drbd_peer_request {
61845bb912bSLars Ellenberg 	struct drbd_work w;
61985719573SPhilipp Reisner 	struct drbd_epoch *epoch; /* for writes */
62045bb912bSLars Ellenberg 	struct page *pages;
62145bb912bSLars Ellenberg 	atomic_t pending_bios;
622010f6e67SAndreas Gruenbacher 	struct drbd_interval i;
62345bb912bSLars Ellenberg 	/* see comments on ee flag bits below */
62445bb912bSLars Ellenberg 	unsigned long flags;
62585719573SPhilipp Reisner 	union {
62645bb912bSLars Ellenberg 		u64 block_id;
62785719573SPhilipp Reisner 		struct digest_info *digest;
62885719573SPhilipp Reisner 	};
62945bb912bSLars Ellenberg };
63045bb912bSLars Ellenberg 
63145bb912bSLars Ellenberg /* ee flag bits.
63245bb912bSLars Ellenberg  * While corresponding bios are in flight, the only modification will be
63345bb912bSLars Ellenberg  * set_bit WAS_ERROR, which has to be atomic.
63445bb912bSLars Ellenberg  * If no bios are in flight yet, or all have been completed,
63545bb912bSLars Ellenberg  * non-atomic modification to ee->flags is ok.
63645bb912bSLars Ellenberg  */
637b411b363SPhilipp Reisner enum {
638b411b363SPhilipp Reisner 	__EE_CALL_AL_COMPLETE_IO,
639b411b363SPhilipp Reisner 	__EE_MAY_SET_IN_SYNC,
64045bb912bSLars Ellenberg 
64145bb912bSLars Ellenberg 	/* In case a barrier failed,
64245bb912bSLars Ellenberg 	 * we need to resubmit without the barrier flag. */
64345bb912bSLars Ellenberg 	__EE_RESUBMITTED,
64445bb912bSLars Ellenberg 
6456c852becSAndreas Gruenbacher 	/* we may have several bios per peer request.
64645bb912bSLars Ellenberg 	 * if any of those fail, we set this flag atomically
64745bb912bSLars Ellenberg 	 * from the endio callback */
64845bb912bSLars Ellenberg 	__EE_WAS_ERROR,
649c36c3cedSLars Ellenberg 
650c36c3cedSLars Ellenberg 	/* This ee has a pointer to a digest instead of a block id */
651c36c3cedSLars Ellenberg 	__EE_HAS_DIGEST,
6527be8da07SAndreas Gruenbacher 
6537be8da07SAndreas Gruenbacher 	/* Conflicting local requests need to be restarted after this request */
6547be8da07SAndreas Gruenbacher 	__EE_RESTART_REQUESTS,
655303d1448SPhilipp Reisner 
656303d1448SPhilipp Reisner 	/* The peer wants a write ACK for this (wire proto C) */
657303d1448SPhilipp Reisner 	__EE_SEND_WRITE_ACK,
658302bdeaeSPhilipp Reisner 
659302bdeaeSPhilipp Reisner 	/* Is set when net_conf had two_primaries set while creating this peer_req */
660302bdeaeSPhilipp Reisner 	__EE_IN_INTERVAL_TREE,
661b411b363SPhilipp Reisner };
662b411b363SPhilipp Reisner #define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO)
663b411b363SPhilipp Reisner #define EE_MAY_SET_IN_SYNC     (1<<__EE_MAY_SET_IN_SYNC)
66445bb912bSLars Ellenberg #define	EE_RESUBMITTED         (1<<__EE_RESUBMITTED)
66545bb912bSLars Ellenberg #define EE_WAS_ERROR           (1<<__EE_WAS_ERROR)
666c36c3cedSLars Ellenberg #define EE_HAS_DIGEST          (1<<__EE_HAS_DIGEST)
6677be8da07SAndreas Gruenbacher #define EE_RESTART_REQUESTS	(1<<__EE_RESTART_REQUESTS)
668303d1448SPhilipp Reisner #define EE_SEND_WRITE_ACK	(1<<__EE_SEND_WRITE_ACK)
669302bdeaeSPhilipp Reisner #define EE_IN_INTERVAL_TREE	(1<<__EE_IN_INTERVAL_TREE)
670b411b363SPhilipp Reisner 
67101a311a5SPhilipp Reisner /* flag bits per mdev */
672b411b363SPhilipp Reisner enum {
673b411b363SPhilipp Reisner 	UNPLUG_QUEUED,		/* only relevant with kernel 2.4 */
674b411b363SPhilipp Reisner 	UNPLUG_REMOTE,		/* sending a "UnplugRemote" could help */
675b411b363SPhilipp Reisner 	MD_DIRTY,		/* current uuids and flags not yet on disk */
676b411b363SPhilipp Reisner 	USE_DEGR_WFC_T,		/* degr-wfc-timeout instead of wfc-timeout. */
677b411b363SPhilipp Reisner 	CL_ST_CHG_SUCCESS,
678b411b363SPhilipp Reisner 	CL_ST_CHG_FAIL,
679b411b363SPhilipp Reisner 	CRASHED_PRIMARY,	/* This node was a crashed primary.
680b411b363SPhilipp Reisner 				 * Gets cleared when the state.conn
681b411b363SPhilipp Reisner 				 * goes into C_CONNECTED state. */
68219f843aaSLars Ellenberg 	NO_BARRIER_SUPP,	/* underlying block device doesn't implement barriers */
683b411b363SPhilipp Reisner 	CONSIDER_RESYNC,
684b411b363SPhilipp Reisner 
685a8a4e51eSPhilipp Reisner 	MD_NO_FUA,		/* Users wants us to not use FUA/FLUSH on meta data dev */
686b411b363SPhilipp Reisner 	SUSPEND_IO,		/* suspend application io */
687b411b363SPhilipp Reisner 	BITMAP_IO,		/* suspend application io;
688b411b363SPhilipp Reisner 				   once no more io in flight, start bitmap io */
689b411b363SPhilipp Reisner 	BITMAP_IO_QUEUED,       /* Started bitmap IO */
69082f59cc6SLars Ellenberg 	GO_DISKLESS,		/* Disk is being detached, on io-error or admin request. */
69182f59cc6SLars Ellenberg 	WAS_IO_ERROR,		/* Local disk failed returned IO error */
692b411b363SPhilipp Reisner 	RESYNC_AFTER_NEG,       /* Resync after online grow after the attach&negotiate finished. */
693b411b363SPhilipp Reisner 	RESIZE_PENDING,		/* Size change detected locally, waiting for the response from
694b411b363SPhilipp Reisner 				 * the peer, if it changed there as well. */
69543a5182cSPhilipp Reisner 	NEW_CUR_UUID,		/* Create new current UUID when thawing IO */
6960778286aSPhilipp Reisner 	AL_SUSPENDED,		/* Activity logging is currently suspended. */
697370a43e7SPhilipp Reisner 	AHEAD_TO_SYNC_SOURCE,   /* Ahead -> SyncSource queued */
698e64a3294SPhilipp Reisner 	B_RS_H_DONE,		/* Before resync handler done (already executed) */
69908b165baSPhilipp Reisner 	DISCARD_MY_DATA,	/* discard_my_data flag per volume */
700380207d0SPhilipp Reisner 	READ_BALANCE_RR,
701b411b363SPhilipp Reisner };
702b411b363SPhilipp Reisner 
703b411b363SPhilipp Reisner struct drbd_bitmap; /* opaque for drbd_conf */
704b411b363SPhilipp Reisner 
70520ceb2b2SLars Ellenberg /* definition of bits in bm_flags to be used in drbd_bm_lock
70620ceb2b2SLars Ellenberg  * and drbd_bitmap_io and friends. */
70720ceb2b2SLars Ellenberg enum bm_flag {
70820ceb2b2SLars Ellenberg 	/* do we need to kfree, or vfree bm_pages? */
70920ceb2b2SLars Ellenberg 	BM_P_VMALLOCED = 0x10000, /* internal use only, will be masked out */
71020ceb2b2SLars Ellenberg 
71120ceb2b2SLars Ellenberg 	/* currently locked for bulk operation */
712a220d291SLars Ellenberg 	BM_LOCKED_MASK = 0xf,
71320ceb2b2SLars Ellenberg 
71420ceb2b2SLars Ellenberg 	/* in detail, that is: */
71520ceb2b2SLars Ellenberg 	BM_DONT_CLEAR = 0x1,
71620ceb2b2SLars Ellenberg 	BM_DONT_SET   = 0x2,
71720ceb2b2SLars Ellenberg 	BM_DONT_TEST  = 0x4,
71820ceb2b2SLars Ellenberg 
719a220d291SLars Ellenberg 	/* so we can mark it locked for bulk operation,
720a220d291SLars Ellenberg 	 * and still allow all non-bulk operations */
721a220d291SLars Ellenberg 	BM_IS_LOCKED  = 0x8,
722a220d291SLars Ellenberg 
72320ceb2b2SLars Ellenberg 	/* (test bit, count bit) allowed (common case) */
724a220d291SLars Ellenberg 	BM_LOCKED_TEST_ALLOWED = BM_DONT_CLEAR | BM_DONT_SET | BM_IS_LOCKED,
72520ceb2b2SLars Ellenberg 
72620ceb2b2SLars Ellenberg 	/* testing bits, as well as setting new bits allowed, but clearing bits
72720ceb2b2SLars Ellenberg 	 * would be unexpected.  Used during bitmap receive.  Setting new bits
72820ceb2b2SLars Ellenberg 	 * requires sending of "out-of-sync" information, though. */
729a220d291SLars Ellenberg 	BM_LOCKED_SET_ALLOWED = BM_DONT_CLEAR | BM_IS_LOCKED,
73020ceb2b2SLars Ellenberg 
731a220d291SLars Ellenberg 	/* for drbd_bm_write_copy_pages, everything is allowed,
732a220d291SLars Ellenberg 	 * only concurrent bulk operations are locked out. */
733a220d291SLars Ellenberg 	BM_LOCKED_CHANGE_ALLOWED = BM_IS_LOCKED,
73420ceb2b2SLars Ellenberg };
73520ceb2b2SLars Ellenberg 
736b411b363SPhilipp Reisner struct drbd_work_queue {
737b411b363SPhilipp Reisner 	struct list_head q;
738b411b363SPhilipp Reisner 	spinlock_t q_lock;  /* to protect the list. */
7398c0785a5SLars Ellenberg 	wait_queue_head_t q_wait;
740b411b363SPhilipp Reisner };
741b411b363SPhilipp Reisner 
742b411b363SPhilipp Reisner struct drbd_socket {
743b411b363SPhilipp Reisner 	struct mutex mutex;
744b411b363SPhilipp Reisner 	struct socket    *socket;
745b411b363SPhilipp Reisner 	/* this way we get our
746b411b363SPhilipp Reisner 	 * send/receive buffers off the stack */
7475a87d920SAndreas Gruenbacher 	void *sbuf;
748e6ef8a5cSAndreas Gruenbacher 	void *rbuf;
749b411b363SPhilipp Reisner };
750b411b363SPhilipp Reisner 
751b411b363SPhilipp Reisner struct drbd_md {
752b411b363SPhilipp Reisner 	u64 md_offset;		/* sector offset to 'super' block */
753b411b363SPhilipp Reisner 
754b411b363SPhilipp Reisner 	u64 la_size_sect;	/* last agreed size, unit sectors */
755b411b363SPhilipp Reisner 	u64 uuid[UI_SIZE];
756b411b363SPhilipp Reisner 	u64 device_uuid;
757b411b363SPhilipp Reisner 	u32 flags;
758b411b363SPhilipp Reisner 	u32 md_size_sect;
759b411b363SPhilipp Reisner 
760b411b363SPhilipp Reisner 	s32 al_offset;	/* signed relative sector offset to al area */
761b411b363SPhilipp Reisner 	s32 bm_offset;	/* signed relative sector offset to bitmap */
762b411b363SPhilipp Reisner 
763b411b363SPhilipp Reisner 	/* u32 al_nr_extents;	   important for restoring the AL
764f399002eSLars Ellenberg 	 * is stored into  ldev->dc.al_extents, which in turn
765b411b363SPhilipp Reisner 	 * gets applied to act_log->nr_elements
766b411b363SPhilipp Reisner 	 */
767b411b363SPhilipp Reisner };
768b411b363SPhilipp Reisner 
769b411b363SPhilipp Reisner struct drbd_backing_dev {
770b411b363SPhilipp Reisner 	struct block_device *backing_bdev;
771b411b363SPhilipp Reisner 	struct block_device *md_bdev;
772b411b363SPhilipp Reisner 	struct drbd_md md;
773daeda1ccSPhilipp Reisner 	struct disk_conf *disk_conf; /* RCU, for updates: mdev->tconn->conf_update */
774b411b363SPhilipp Reisner 	sector_t known_size; /* last known size of that backing device */
775b411b363SPhilipp Reisner };
776b411b363SPhilipp Reisner 
777b411b363SPhilipp Reisner struct drbd_md_io {
778cdfda633SPhilipp Reisner 	unsigned int done;
779b411b363SPhilipp Reisner 	int error;
780b411b363SPhilipp Reisner };
781b411b363SPhilipp Reisner 
782b411b363SPhilipp Reisner struct bm_io_work {
783b411b363SPhilipp Reisner 	struct drbd_work w;
784b411b363SPhilipp Reisner 	char *why;
78520ceb2b2SLars Ellenberg 	enum bm_flag flags;
786b411b363SPhilipp Reisner 	int (*io_fn)(struct drbd_conf *mdev);
787b411b363SPhilipp Reisner 	void (*done)(struct drbd_conf *mdev, int rv);
788b411b363SPhilipp Reisner };
789b411b363SPhilipp Reisner 
790b411b363SPhilipp Reisner enum write_ordering_e {
791b411b363SPhilipp Reisner 	WO_none,
792b411b363SPhilipp Reisner 	WO_drain_io,
793b411b363SPhilipp Reisner 	WO_bdev_flush,
794b411b363SPhilipp Reisner };
795b411b363SPhilipp Reisner 
796778f271dSPhilipp Reisner struct fifo_buffer {
797778f271dSPhilipp Reisner 	unsigned int head_index;
798778f271dSPhilipp Reisner 	unsigned int size;
7999958c857SPhilipp Reisner 	int total; /* sum of all values */
8009958c857SPhilipp Reisner 	int values[0];
801778f271dSPhilipp Reisner };
8029958c857SPhilipp Reisner extern struct fifo_buffer *fifo_alloc(int fifo_size);
803778f271dSPhilipp Reisner 
80401a311a5SPhilipp Reisner /* flag bits per tconn */
80501a311a5SPhilipp Reisner enum {
80601a311a5SPhilipp Reisner 	NET_CONGESTED,		/* The data socket is congested */
80725703f83SPhilipp Reisner 	DISCARD_CONCURRENT,	/* Set on one node, cleared on the peer! */
808e43ef195SPhilipp Reisner 	SEND_PING,		/* whether asender should send a ping asap */
809808e37b8SPhilipp Reisner 	SIGNAL_ASENDER,		/* whether asender wants to be interrupted */
8102a67d8b9SPhilipp Reisner 	GOT_PING_ACK,		/* set when we receive a ping_ack packet, ping_wait gets woken */
8114d0fc3fdSPhilipp Reisner 	CONN_WD_ST_CHG_REQ,	/* A cluster wide state change on the connection is active */
812fc3b10a4SPhilipp Reisner 	CONN_WD_ST_CHG_OKAY,
813fc3b10a4SPhilipp Reisner 	CONN_WD_ST_CHG_FAIL,
8148169e41bSPhilipp Reisner 	CONN_DRY_RUN,		/* Expect disconnect after resync handshake. */
8156936fcb4SPhilipp Reisner 	CREATE_BARRIER,		/* next P_DATA is preceded by a P_BARRIER */
816a1096a6eSPhilipp Reisner 	STATE_SENT,		/* Do not change state/UUIDs while this is set */
81701a311a5SPhilipp Reisner };
81801a311a5SPhilipp Reisner 
8192111438bSPhilipp Reisner struct drbd_tconn {			/* is a resource from the config file */
8202111438bSPhilipp Reisner 	char *name;			/* Resource name */
821543cc10bSLars Ellenberg 	struct list_head all_tconn;	/* linked on global drbd_tconns */
8229dc9fbb3SPhilipp Reisner 	struct kref kref;
823062e879cSPhilipp Reisner 	struct idr volumes;		/* <tconn, vnr> to mdev mapping */
824bbeb641cSPhilipp Reisner 	enum drbd_conns cstate;		/* Only C_STANDALONE to C_WF_REPORT_PARAMS */
8258e0af25fSPhilipp Reisner 	unsigned susp:1;		/* IO suspended by user */
8268e0af25fSPhilipp Reisner 	unsigned susp_nod:1;		/* IO suspended because no data */
8278e0af25fSPhilipp Reisner 	unsigned susp_fen:1;		/* IO suspended because fence peer handler runs */
8288410da8fSPhilipp Reisner 	struct mutex cstate_mutex;	/* Protects graceful disconnects */
8292111438bSPhilipp Reisner 
830062e879cSPhilipp Reisner 	unsigned long flags;
83144ed167dSPhilipp Reisner 	struct net_conf *net_conf;	/* content protected by rcu */
832a0095508SPhilipp Reisner 	struct mutex conf_update;	/* mutex for ready-copy-update of net_conf and disk_conf */
8332a67d8b9SPhilipp Reisner 	wait_queue_head_t ping_wait;	/* Woken upon reception of a ping, and a state change */
834f399002eSLars Ellenberg 	struct res_opts res_opts;
835e42325a5SPhilipp Reisner 
836089c075dSAndreas Gruenbacher 	struct sockaddr_storage my_addr;
837089c075dSAndreas Gruenbacher 	int my_addr_len;
838089c075dSAndreas Gruenbacher 	struct sockaddr_storage peer_addr;
839089c075dSAndreas Gruenbacher 	int peer_addr_len;
840089c075dSAndreas Gruenbacher 
841e42325a5SPhilipp Reisner 	struct drbd_socket data;	/* data/barrier/cstate/parameter packets */
842e42325a5SPhilipp Reisner 	struct drbd_socket meta;	/* ping/ack (metadata) packets */
84331890f4aSPhilipp Reisner 	int agreed_pro_version;		/* actually used protocol version */
84431890f4aSPhilipp Reisner 	unsigned long last_received;	/* in jiffies, either socket */
84531890f4aSPhilipp Reisner 	unsigned int ko_count;
846e6b3ea83SPhilipp Reisner 
84787eeee41SPhilipp Reisner 	spinlock_t req_lock;
84887eeee41SPhilipp Reisner 	struct drbd_tl_epoch *unused_spare_tle; /* for pre-allocation */
84987eeee41SPhilipp Reisner 	struct drbd_tl_epoch *newest_tle;
85087eeee41SPhilipp Reisner 	struct drbd_tl_epoch *oldest_tle;
85187eeee41SPhilipp Reisner 	struct list_head out_of_sequence_requests;
852cdfda633SPhilipp Reisner 	struct list_head barrier_acked_requests;
85387eeee41SPhilipp Reisner 
854a0638456SPhilipp Reisner 	struct crypto_hash *cram_hmac_tfm;
85546e1ce41SPhilipp Reisner 	struct crypto_hash *integrity_tfm;  /* checksums we compute, updates protected by tconn->data->mutex */
856036b17eaSPhilipp Reisner 	struct crypto_hash *peer_integrity_tfm;  /* checksums we verify, only accessed from receiver thread  */
857f399002eSLars Ellenberg 	struct crypto_hash *csums_tfm;
858f399002eSLars Ellenberg 	struct crypto_hash *verify_tfm;
859a0638456SPhilipp Reisner 	void *int_dig_in;
860a0638456SPhilipp Reisner 	void *int_dig_vv;
861a0638456SPhilipp Reisner 
86212038a3aSPhilipp Reisner 	struct drbd_epoch *current_epoch;
86312038a3aSPhilipp Reisner 	spinlock_t epoch_lock;
86412038a3aSPhilipp Reisner 	unsigned int epochs;
8654b0007c0SPhilipp Reisner 	enum write_ordering_e write_ordering;
866b379c41eSLars Ellenberg 	atomic_t current_tle_nr;	/* transfer log epoch number */
8674b0007c0SPhilipp Reisner 
86807be15b1SLars Ellenberg 	unsigned long last_reconnect_jif;
869e6b3ea83SPhilipp Reisner 	struct drbd_thread receiver;
870e6b3ea83SPhilipp Reisner 	struct drbd_thread worker;
871e6b3ea83SPhilipp Reisner 	struct drbd_thread asender;
87280822284SPhilipp Reisner 	cpumask_var_t cpu_mask;
873*d5b27b01SLars Ellenberg 	struct drbd_work_queue sender_work;
8742111438bSPhilipp Reisner };
8752111438bSPhilipp Reisner 
876b411b363SPhilipp Reisner struct drbd_conf {
8772111438bSPhilipp Reisner 	struct drbd_tconn *tconn;
8782111438bSPhilipp Reisner 	int vnr;			/* volume number within the connection */
87981fa2e67SPhilipp Reisner 	struct kref kref;
8802111438bSPhilipp Reisner 
881b411b363SPhilipp Reisner 	/* things that are stored as / read from meta data on disk */
882b411b363SPhilipp Reisner 	unsigned long flags;
883b411b363SPhilipp Reisner 
884b411b363SPhilipp Reisner 	/* configured by drbdsetup */
885b411b363SPhilipp Reisner 	struct drbd_backing_dev *ldev __protected_by(local);
886b411b363SPhilipp Reisner 
887b411b363SPhilipp Reisner 	sector_t p_size;     /* partner's disk size */
888b411b363SPhilipp Reisner 	struct request_queue *rq_queue;
889b411b363SPhilipp Reisner 	struct block_device *this_bdev;
890b411b363SPhilipp Reisner 	struct gendisk	    *vdisk;
891b411b363SPhilipp Reisner 
89207be15b1SLars Ellenberg 	unsigned long last_reattach_jif;
893b411b363SPhilipp Reisner 	struct drbd_work  resync_work,
894b411b363SPhilipp Reisner 			  unplug_work,
895e9e6f3ecSLars Ellenberg 			  go_diskless,
896c4752ef1SPhilipp Reisner 			  md_sync_work,
897c4752ef1SPhilipp Reisner 			  start_resync_work;
898b411b363SPhilipp Reisner 	struct timer_list resync_timer;
899b411b363SPhilipp Reisner 	struct timer_list md_sync_timer;
900370a43e7SPhilipp Reisner 	struct timer_list start_resync_timer;
9017fde2be9SPhilipp Reisner 	struct timer_list request_timer;
902ee15b038SLars Ellenberg #ifdef DRBD_DEBUG_MD_SYNC
903ee15b038SLars Ellenberg 	struct {
904ee15b038SLars Ellenberg 		unsigned int line;
905ee15b038SLars Ellenberg 		const char* func;
906ee15b038SLars Ellenberg 	} last_md_mark_dirty;
907ee15b038SLars Ellenberg #endif
908b411b363SPhilipp Reisner 
909b411b363SPhilipp Reisner 	/* Used after attach while negotiating new disk state. */
910b411b363SPhilipp Reisner 	union drbd_state new_state_tmp;
911b411b363SPhilipp Reisner 
912da9fbc27SPhilipp Reisner 	union drbd_dev_state state;
913b411b363SPhilipp Reisner 	wait_queue_head_t misc_wait;
914b411b363SPhilipp Reisner 	wait_queue_head_t state_wait;  /* upon each state change. */
915b411b363SPhilipp Reisner 	unsigned int send_cnt;
916b411b363SPhilipp Reisner 	unsigned int recv_cnt;
917b411b363SPhilipp Reisner 	unsigned int read_cnt;
918b411b363SPhilipp Reisner 	unsigned int writ_cnt;
919b411b363SPhilipp Reisner 	unsigned int al_writ_cnt;
920b411b363SPhilipp Reisner 	unsigned int bm_writ_cnt;
921b411b363SPhilipp Reisner 	atomic_t ap_bio_cnt;	 /* Requests we need to complete */
922b411b363SPhilipp Reisner 	atomic_t ap_pending_cnt; /* AP data packets on the wire, ack expected */
923b411b363SPhilipp Reisner 	atomic_t rs_pending_cnt; /* RS request/data packets on the wire */
924d942ae44SPhilipp Reisner 	atomic_t unacked_cnt;	 /* Need to send replies for */
925b411b363SPhilipp Reisner 	atomic_t local_cnt;	 /* Waiting for local completion */
926b2fb6dbeSPhilipp Reisner 
927dac1389cSAndreas Gruenbacher 	/* Interval tree of pending local requests */
928dac1389cSAndreas Gruenbacher 	struct rb_root read_requests;
929de696716SAndreas Gruenbacher 	struct rb_root write_requests;
930de696716SAndreas Gruenbacher 
9314b0715f0SLars Ellenberg 	/* blocks to resync in this run [unit BM_BLOCK_SIZE] */
932b411b363SPhilipp Reisner 	unsigned long rs_total;
9334b0715f0SLars Ellenberg 	/* number of resync blocks that failed in this run */
934b411b363SPhilipp Reisner 	unsigned long rs_failed;
935b411b363SPhilipp Reisner 	/* Syncer's start time [unit jiffies] */
936b411b363SPhilipp Reisner 	unsigned long rs_start;
937b411b363SPhilipp Reisner 	/* cumulated time in PausedSyncX state [unit jiffies] */
938b411b363SPhilipp Reisner 	unsigned long rs_paused;
9391d7734a0SLars Ellenberg 	/* skipped because csum was equal [unit BM_BLOCK_SIZE] */
940b411b363SPhilipp Reisner 	unsigned long rs_same_csum;
9411d7734a0SLars Ellenberg #define DRBD_SYNC_MARKS 8
9421d7734a0SLars Ellenberg #define DRBD_SYNC_MARK_STEP (3*HZ)
9431d7734a0SLars Ellenberg 	/* block not up-to-date at mark [unit BM_BLOCK_SIZE] */
9441d7734a0SLars Ellenberg 	unsigned long rs_mark_left[DRBD_SYNC_MARKS];
9451d7734a0SLars Ellenberg 	/* marks's time [unit jiffies] */
9461d7734a0SLars Ellenberg 	unsigned long rs_mark_time[DRBD_SYNC_MARKS];
9471d7734a0SLars Ellenberg 	/* current index into rs_mark_{left,time} */
9481d7734a0SLars Ellenberg 	int rs_last_mark;
949b411b363SPhilipp Reisner 
950b411b363SPhilipp Reisner 	/* where does the admin want us to start? (sector) */
951b411b363SPhilipp Reisner 	sector_t ov_start_sector;
952b411b363SPhilipp Reisner 	/* where are we now? (sector) */
953b411b363SPhilipp Reisner 	sector_t ov_position;
954b411b363SPhilipp Reisner 	/* Start sector of out of sync range (to merge printk reporting). */
955b411b363SPhilipp Reisner 	sector_t ov_last_oos_start;
956b411b363SPhilipp Reisner 	/* size of out-of-sync range in sectors. */
957b411b363SPhilipp Reisner 	sector_t ov_last_oos_size;
958b411b363SPhilipp Reisner 	unsigned long ov_left; /* in bits */
959b411b363SPhilipp Reisner 
960b411b363SPhilipp Reisner 	struct drbd_bitmap *bitmap;
961b411b363SPhilipp Reisner 	unsigned long bm_resync_fo; /* bit offset for drbd_bm_find_next */
962b411b363SPhilipp Reisner 
963b411b363SPhilipp Reisner 	/* Used to track operations of resync... */
964b411b363SPhilipp Reisner 	struct lru_cache *resync;
965b411b363SPhilipp Reisner 	/* Number of locked elements in resync LRU */
966b411b363SPhilipp Reisner 	unsigned int resync_locked;
967b411b363SPhilipp Reisner 	/* resync extent number waiting for application requests */
968b411b363SPhilipp Reisner 	unsigned int resync_wenr;
969b411b363SPhilipp Reisner 
970b411b363SPhilipp Reisner 	int open_cnt;
971b411b363SPhilipp Reisner 	u64 *p_uuid;
9724b0007c0SPhilipp Reisner 
97385719573SPhilipp Reisner 	struct list_head active_ee; /* IO in progress (P_DATA gets written to disk) */
97485719573SPhilipp Reisner 	struct list_head sync_ee;   /* IO in progress (P_RS_DATA_REPLY gets written to disk) */
97518b75d75SAndreas Gruenbacher 	struct list_head done_ee;   /* need to send P_WRITE_ACK */
97618b75d75SAndreas Gruenbacher 	struct list_head read_ee;   /* [RS]P_DATA_REQUEST being read */
977b411b363SPhilipp Reisner 	struct list_head net_ee;    /* zero-copy network send in progress */
978b411b363SPhilipp Reisner 
979b411b363SPhilipp Reisner 	int next_barrier_nr;
980b411b363SPhilipp Reisner 	struct list_head resync_reads;
981435f0740SLars Ellenberg 	atomic_t pp_in_use;		/* allocated from page pool */
982435f0740SLars Ellenberg 	atomic_t pp_in_use_by_net;	/* sendpage()d, still referenced by tcp */
983b411b363SPhilipp Reisner 	wait_queue_head_t ee_wait;
984b411b363SPhilipp Reisner 	struct page *md_io_page;	/* one page buffer for md_io */
985cdfda633SPhilipp Reisner 	struct drbd_md_io md_io;
986cdfda633SPhilipp Reisner 	atomic_t md_io_in_use;		/* protects the md_io, md_io_page and md_io_tmpp */
987b411b363SPhilipp Reisner 	spinlock_t al_lock;
988b411b363SPhilipp Reisner 	wait_queue_head_t al_wait;
989b411b363SPhilipp Reisner 	struct lru_cache *act_log;	/* activity log */
990b411b363SPhilipp Reisner 	unsigned int al_tr_number;
991b411b363SPhilipp Reisner 	int al_tr_cycle;
992b411b363SPhilipp Reisner 	int al_tr_pos;   /* position of the next transaction in the journal */
993b411b363SPhilipp Reisner 	wait_queue_head_t seq_wait;
994b411b363SPhilipp Reisner 	atomic_t packet_seq;
995b411b363SPhilipp Reisner 	unsigned int peer_seq;
996b411b363SPhilipp Reisner 	spinlock_t peer_seq_lock;
997b411b363SPhilipp Reisner 	unsigned int minor;
998b411b363SPhilipp Reisner 	unsigned long comm_bm_set; /* communicated number of set bits. */
999b411b363SPhilipp Reisner 	struct bm_io_work bm_io_work;
1000b411b363SPhilipp Reisner 	u64 ed_uuid; /* UUID of the exposed data */
10018410da8fSPhilipp Reisner 	struct mutex own_state_mutex;
10028410da8fSPhilipp Reisner 	struct mutex *state_mutex; /* either own_state_mutex or mdev->tconn->cstate_mutex */
1003b411b363SPhilipp Reisner 	char congestion_reason;  /* Why we where congested... */
10041d7734a0SLars Ellenberg 	atomic_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
10051d7734a0SLars Ellenberg 	atomic_t rs_sect_ev; /* for submitted resync data rate, both */
10061d7734a0SLars Ellenberg 	int rs_last_sect_ev; /* counter to compare with */
10071d7734a0SLars Ellenberg 	int rs_last_events;  /* counter of read or write "events" (unit sectors)
10081d7734a0SLars Ellenberg 			      * on the lower level device when we last looked. */
10091d7734a0SLars Ellenberg 	int c_sync_rate; /* current resync rate after syncer throttle magic */
1010813472ceSPhilipp Reisner 	struct fifo_buffer *rs_plan_s; /* correction values of resync planer (RCU, tconn->conn_update) */
1011778f271dSPhilipp Reisner 	int rs_in_flight; /* resync sectors in flight (to proxy, in proxy and from proxy) */
1012759fbdfbSPhilipp Reisner 	atomic_t ap_in_flight; /* App sectors in flight (waiting for ack) */
101399432fccSPhilipp Reisner 	int peer_max_bio_size;
101499432fccSPhilipp Reisner 	int local_max_bio_size;
1015b411b363SPhilipp Reisner };
1016b411b363SPhilipp Reisner 
1017b411b363SPhilipp Reisner static inline struct drbd_conf *minor_to_mdev(unsigned int minor)
1018b411b363SPhilipp Reisner {
101981a5d60eSPhilipp Reisner 	return (struct drbd_conf *)idr_find(&minors, minor);
1020b411b363SPhilipp Reisner }
1021b411b363SPhilipp Reisner 
1022b411b363SPhilipp Reisner static inline unsigned int mdev_to_minor(struct drbd_conf *mdev)
1023b411b363SPhilipp Reisner {
1024b411b363SPhilipp Reisner 	return mdev->minor;
1025b411b363SPhilipp Reisner }
1026b411b363SPhilipp Reisner 
1027eefc2f7dSPhilipp Reisner static inline struct drbd_conf *vnr_to_mdev(struct drbd_tconn *tconn, int vnr)
1028eefc2f7dSPhilipp Reisner {
1029eefc2f7dSPhilipp Reisner 	return (struct drbd_conf *)idr_find(&tconn->volumes, vnr);
1030eefc2f7dSPhilipp Reisner }
1031eefc2f7dSPhilipp Reisner 
1032b411b363SPhilipp Reisner /*
1033b411b363SPhilipp Reisner  * function declarations
1034b411b363SPhilipp Reisner  *************************/
1035b411b363SPhilipp Reisner 
1036b411b363SPhilipp Reisner /* drbd_main.c */
1037b411b363SPhilipp Reisner 
1038e89b591cSPhilipp Reisner enum dds_flags {
1039e89b591cSPhilipp Reisner 	DDSF_FORCED    = 1,
1040e89b591cSPhilipp Reisner 	DDSF_NO_RESYNC = 2, /* Do not run a resync for the new space */
1041e89b591cSPhilipp Reisner };
1042e89b591cSPhilipp Reisner 
1043b411b363SPhilipp Reisner extern void drbd_init_set_defaults(struct drbd_conf *mdev);
1044b411b363SPhilipp Reisner extern int  drbd_thread_start(struct drbd_thread *thi);
1045b411b363SPhilipp Reisner extern void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait);
1046392c8801SPhilipp Reisner extern char *drbd_task_to_thread_name(struct drbd_tconn *tconn, struct task_struct *task);
1047b411b363SPhilipp Reisner #ifdef CONFIG_SMP
104880822284SPhilipp Reisner extern void drbd_thread_current_set_cpu(struct drbd_thread *thi);
104980822284SPhilipp Reisner extern void drbd_calc_cpu_mask(struct drbd_tconn *tconn);
1050b411b363SPhilipp Reisner #else
105180822284SPhilipp Reisner #define drbd_thread_current_set_cpu(A) ({})
1052b411b363SPhilipp Reisner #define drbd_calc_cpu_mask(A) ({})
1053b411b363SPhilipp Reisner #endif
10542f5cdd0bSPhilipp Reisner extern void tl_release(struct drbd_tconn *, unsigned int barrier_nr,
1055b411b363SPhilipp Reisner 		       unsigned int set_size);
10562f5cdd0bSPhilipp Reisner extern void tl_clear(struct drbd_tconn *);
10572f5cdd0bSPhilipp Reisner extern void _tl_add_barrier(struct drbd_tconn *, struct drbd_tl_epoch *);
1058360cc740SPhilipp Reisner extern void drbd_free_sock(struct drbd_tconn *tconn);
1059bedbd2a5SPhilipp Reisner extern int drbd_send(struct drbd_tconn *tconn, struct socket *sock,
1060b411b363SPhilipp Reisner 		     void *buf, size_t size, unsigned msg_flags);
1061fb708e40SAndreas Gruenbacher extern int drbd_send_all(struct drbd_tconn *, struct socket *, void *, size_t,
1062fb708e40SAndreas Gruenbacher 			 unsigned);
1063fb708e40SAndreas Gruenbacher 
1064d659f2aaSPhilipp Reisner extern int __drbd_send_protocol(struct drbd_tconn *tconn, enum drbd_packet cmd);
1065dc8228d1SPhilipp Reisner extern int drbd_send_protocol(struct drbd_tconn *tconn);
1066b411b363SPhilipp Reisner extern int drbd_send_uuids(struct drbd_conf *mdev);
1067b411b363SPhilipp Reisner extern int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev);
10689c1b7f72SAndreas Gruenbacher extern void drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev);
1069e89b591cSPhilipp Reisner extern int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags);
107043de7c85SPhilipp Reisner extern int drbd_send_state(struct drbd_conf *mdev, union drbd_state s);
107143de7c85SPhilipp Reisner extern int drbd_send_current_state(struct drbd_conf *mdev);
1072f399002eSLars Ellenberg extern int drbd_send_sync_param(struct drbd_conf *mdev);
10739ed57dcbSLars Ellenberg extern void drbd_send_b_ack(struct drbd_tconn *tconn, u32 barrier_nr,
1074b411b363SPhilipp Reisner 			    u32 set_size);
1075f6ffca9fSAndreas Gruenbacher extern int drbd_send_ack(struct drbd_conf *, enum drbd_packet,
1076f6ffca9fSAndreas Gruenbacher 			 struct drbd_peer_request *);
1077a9a9994dSAndreas Gruenbacher extern void drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packet cmd,
1078b411b363SPhilipp Reisner 			     struct p_block_req *rp);
1079a9a9994dSAndreas Gruenbacher extern void drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packet cmd,
10802b2bf214SLars Ellenberg 			     struct p_data *dp, int data_size);
1081d8763023SAndreas Gruenbacher extern int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packet cmd,
1082b411b363SPhilipp Reisner 			    sector_t sector, int blksize, u64 block_id);
10838f7bed77SAndreas Gruenbacher extern int drbd_send_out_of_sync(struct drbd_conf *, struct drbd_request *);
1084f6ffca9fSAndreas Gruenbacher extern int drbd_send_block(struct drbd_conf *, enum drbd_packet,
1085f6ffca9fSAndreas Gruenbacher 			   struct drbd_peer_request *);
1086b411b363SPhilipp Reisner extern int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req);
1087b411b363SPhilipp Reisner extern int drbd_send_drequest(struct drbd_conf *mdev, int cmd,
1088b411b363SPhilipp Reisner 			      sector_t sector, int size, u64 block_id);
1089d8763023SAndreas Gruenbacher extern int drbd_send_drequest_csum(struct drbd_conf *mdev, sector_t sector,
1090d8763023SAndreas Gruenbacher 				   int size, void *digest, int digest_size,
1091d8763023SAndreas Gruenbacher 				   enum drbd_packet cmd);
1092b411b363SPhilipp Reisner extern int drbd_send_ov_request(struct drbd_conf *mdev,sector_t sector,int size);
1093b411b363SPhilipp Reisner 
1094b411b363SPhilipp Reisner extern int drbd_send_bitmap(struct drbd_conf *mdev);
10952f4e7abeSAndreas Gruenbacher extern void drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode);
10969f5bdc33SAndreas Gruenbacher extern void conn_send_sr_reply(struct drbd_tconn *tconn, enum drbd_state_rv retcode);
1097b411b363SPhilipp Reisner extern void drbd_free_bc(struct drbd_backing_dev *ldev);
1098b411b363SPhilipp Reisner extern void drbd_mdev_cleanup(struct drbd_conf *mdev);
109962b0da3aSLars Ellenberg void drbd_print_uuids(struct drbd_conf *mdev, const char *text);
1100b411b363SPhilipp Reisner 
1101b411b363SPhilipp Reisner extern void drbd_md_sync(struct drbd_conf *mdev);
1102b411b363SPhilipp Reisner extern int  drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev);
1103b411b363SPhilipp Reisner extern void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local);
1104b411b363SPhilipp Reisner extern void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local);
1105b411b363SPhilipp Reisner extern void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local);
1106b411b363SPhilipp Reisner extern void _drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local);
1107b411b363SPhilipp Reisner extern void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local);
1108b411b363SPhilipp Reisner extern void drbd_md_set_flag(struct drbd_conf *mdev, int flags) __must_hold(local);
1109b411b363SPhilipp Reisner extern void drbd_md_clear_flag(struct drbd_conf *mdev, int flags)__must_hold(local);
1110b411b363SPhilipp Reisner extern int drbd_md_test_flag(struct drbd_backing_dev *, int);
1111ee15b038SLars Ellenberg #ifndef DRBD_DEBUG_MD_SYNC
1112b411b363SPhilipp Reisner extern void drbd_md_mark_dirty(struct drbd_conf *mdev);
1113ee15b038SLars Ellenberg #else
1114ee15b038SLars Ellenberg #define drbd_md_mark_dirty(m)	drbd_md_mark_dirty_(m, __LINE__ , __func__ )
1115ee15b038SLars Ellenberg extern void drbd_md_mark_dirty_(struct drbd_conf *mdev,
1116ee15b038SLars Ellenberg 		unsigned int line, const char *func);
1117ee15b038SLars Ellenberg #endif
1118b411b363SPhilipp Reisner extern void drbd_queue_bitmap_io(struct drbd_conf *mdev,
1119b411b363SPhilipp Reisner 				 int (*io_fn)(struct drbd_conf *),
1120b411b363SPhilipp Reisner 				 void (*done)(struct drbd_conf *, int),
112120ceb2b2SLars Ellenberg 				 char *why, enum bm_flag flags);
112220ceb2b2SLars Ellenberg extern int drbd_bitmap_io(struct drbd_conf *mdev,
112320ceb2b2SLars Ellenberg 		int (*io_fn)(struct drbd_conf *),
112420ceb2b2SLars Ellenberg 		char *why, enum bm_flag flags);
1125b411b363SPhilipp Reisner extern int drbd_bmio_set_n_write(struct drbd_conf *mdev);
1126b411b363SPhilipp Reisner extern int drbd_bmio_clear_n_write(struct drbd_conf *mdev);
1127e9e6f3ecSLars Ellenberg extern void drbd_go_diskless(struct drbd_conf *mdev);
112882f59cc6SLars Ellenberg extern void drbd_ldev_destroy(struct drbd_conf *mdev);
1129b411b363SPhilipp Reisner 
1130b411b363SPhilipp Reisner /* Meta data layout
1131b411b363SPhilipp Reisner    We reserve a 128MB Block (4k aligned)
1132b411b363SPhilipp Reisner    * either at the end of the backing device
11333ad2f3fbSDaniel Mack    * or on a separate meta data device. */
1134b411b363SPhilipp Reisner 
1135b411b363SPhilipp Reisner /* The following numbers are sectors */
11367ad651b5SLars Ellenberg /* Allows up to about 3.8TB, so if you want more,
11377ad651b5SLars Ellenberg  * you need to use the "flexible" meta data format. */
11387ad651b5SLars Ellenberg #define MD_RESERVED_SECT (128LU << 11)  /* 128 MB, unit sectors */
1139b411b363SPhilipp Reisner #define MD_AL_OFFSET	8    /* 8 Sectors after start of meta area */
11407ad651b5SLars Ellenberg #define MD_AL_SECTORS	64   /* = 32 kB on disk activity log ring buffer */
11417ad651b5SLars Ellenberg #define MD_BM_OFFSET (MD_AL_OFFSET + MD_AL_SECTORS)
1142b411b363SPhilipp Reisner 
11437ad651b5SLars Ellenberg /* we do all meta data IO in 4k blocks */
11447ad651b5SLars Ellenberg #define MD_BLOCK_SHIFT	12
11457ad651b5SLars Ellenberg #define MD_BLOCK_SIZE	(1<<MD_BLOCK_SHIFT)
1146b411b363SPhilipp Reisner 
11477ad651b5SLars Ellenberg /* One activity log extent represents 4M of storage */
11487ad651b5SLars Ellenberg #define AL_EXTENT_SHIFT 22
1149b411b363SPhilipp Reisner #define AL_EXTENT_SIZE (1<<AL_EXTENT_SHIFT)
1150b411b363SPhilipp Reisner 
11517ad651b5SLars Ellenberg /* We could make these currently hardcoded constants configurable
11527ad651b5SLars Ellenberg  * variables at create-md time (or even re-configurable at runtime?).
11537ad651b5SLars Ellenberg  * Which will require some more changes to the DRBD "super block"
11547ad651b5SLars Ellenberg  * and attach code.
11557ad651b5SLars Ellenberg  *
11567ad651b5SLars Ellenberg  * updates per transaction:
11577ad651b5SLars Ellenberg  *   This many changes to the active set can be logged with one transaction.
11587ad651b5SLars Ellenberg  *   This number is arbitrary.
11597ad651b5SLars Ellenberg  * context per transaction:
11607ad651b5SLars Ellenberg  *   This many context extent numbers are logged with each transaction.
11617ad651b5SLars Ellenberg  *   This number is resulting from the transaction block size (4k), the layout
11627ad651b5SLars Ellenberg  *   of the transaction header, and the number of updates per transaction.
11637ad651b5SLars Ellenberg  *   See drbd_actlog.c:struct al_transaction_on_disk
11647ad651b5SLars Ellenberg  * */
11657ad651b5SLars Ellenberg #define AL_UPDATES_PER_TRANSACTION	 64	// arbitrary
11667ad651b5SLars Ellenberg #define AL_CONTEXT_PER_TRANSACTION	919	// (4096 - 36 - 6*64)/4
11677ad651b5SLars Ellenberg 
1168b411b363SPhilipp Reisner #if BITS_PER_LONG == 32
1169b411b363SPhilipp Reisner #define LN2_BPL 5
1170b411b363SPhilipp Reisner #define cpu_to_lel(A) cpu_to_le32(A)
1171b411b363SPhilipp Reisner #define lel_to_cpu(A) le32_to_cpu(A)
1172b411b363SPhilipp Reisner #elif BITS_PER_LONG == 64
1173b411b363SPhilipp Reisner #define LN2_BPL 6
1174b411b363SPhilipp Reisner #define cpu_to_lel(A) cpu_to_le64(A)
1175b411b363SPhilipp Reisner #define lel_to_cpu(A) le64_to_cpu(A)
1176b411b363SPhilipp Reisner #else
1177b411b363SPhilipp Reisner #error "LN2 of BITS_PER_LONG unknown!"
1178b411b363SPhilipp Reisner #endif
1179b411b363SPhilipp Reisner 
1180b411b363SPhilipp Reisner /* resync bitmap */
1181b411b363SPhilipp Reisner /* 16MB sized 'bitmap extent' to track syncer usage */
1182b411b363SPhilipp Reisner struct bm_extent {
1183b411b363SPhilipp Reisner 	int rs_left; /* number of bits set (out of sync) in this extent. */
1184b411b363SPhilipp Reisner 	int rs_failed; /* number of failed resync requests in this extent. */
1185b411b363SPhilipp Reisner 	unsigned long flags;
1186b411b363SPhilipp Reisner 	struct lc_element lce;
1187b411b363SPhilipp Reisner };
1188b411b363SPhilipp Reisner 
1189b411b363SPhilipp Reisner #define BME_NO_WRITES  0  /* bm_extent.flags: no more requests on this one! */
1190b411b363SPhilipp Reisner #define BME_LOCKED     1  /* bm_extent.flags: syncer active on this one. */
1191e3555d85SPhilipp Reisner #define BME_PRIORITY   2  /* finish resync IO on this extent ASAP! App IO waiting! */
1192b411b363SPhilipp Reisner 
1193b411b363SPhilipp Reisner /* drbd_bitmap.c */
1194b411b363SPhilipp Reisner /*
1195b411b363SPhilipp Reisner  * We need to store one bit for a block.
1196b411b363SPhilipp Reisner  * Example: 1GB disk @ 4096 byte blocks ==> we need 32 KB bitmap.
1197b411b363SPhilipp Reisner  * Bit 0 ==> local node thinks this block is binary identical on both nodes
1198b411b363SPhilipp Reisner  * Bit 1 ==> local node thinks this block needs to be synced.
1199b411b363SPhilipp Reisner  */
1200b411b363SPhilipp Reisner 
12018e26f9ccSPhilipp Reisner #define SLEEP_TIME (HZ/10)
12028e26f9ccSPhilipp Reisner 
120345dfffebSLars Ellenberg /* We do bitmap IO in units of 4k blocks.
120445dfffebSLars Ellenberg  * We also still have a hardcoded 4k per bit relation. */
1205b411b363SPhilipp Reisner #define BM_BLOCK_SHIFT	12			 /* 4k per bit */
1206b411b363SPhilipp Reisner #define BM_BLOCK_SIZE	 (1<<BM_BLOCK_SHIFT)
120745dfffebSLars Ellenberg /* mostly arbitrarily set the represented size of one bitmap extent,
120845dfffebSLars Ellenberg  * aka resync extent, to 16 MiB (which is also 512 Byte worth of bitmap
120945dfffebSLars Ellenberg  * at 4k per bit resolution) */
121045dfffebSLars Ellenberg #define BM_EXT_SHIFT	 24	/* 16 MiB per resync extent */
1211b411b363SPhilipp Reisner #define BM_EXT_SIZE	 (1<<BM_EXT_SHIFT)
1212b411b363SPhilipp Reisner 
1213b411b363SPhilipp Reisner #if (BM_EXT_SHIFT != 24) || (BM_BLOCK_SHIFT != 12)
1214b411b363SPhilipp Reisner #error "HAVE YOU FIXED drbdmeta AS WELL??"
1215b411b363SPhilipp Reisner #endif
1216b411b363SPhilipp Reisner 
1217b411b363SPhilipp Reisner /* thus many _storage_ sectors are described by one bit */
1218b411b363SPhilipp Reisner #define BM_SECT_TO_BIT(x)   ((x)>>(BM_BLOCK_SHIFT-9))
1219b411b363SPhilipp Reisner #define BM_BIT_TO_SECT(x)   ((sector_t)(x)<<(BM_BLOCK_SHIFT-9))
1220b411b363SPhilipp Reisner #define BM_SECT_PER_BIT     BM_BIT_TO_SECT(1)
1221b411b363SPhilipp Reisner 
1222b411b363SPhilipp Reisner /* bit to represented kilo byte conversion */
1223b411b363SPhilipp Reisner #define Bit2KB(bits) ((bits)<<(BM_BLOCK_SHIFT-10))
1224b411b363SPhilipp Reisner 
1225b411b363SPhilipp Reisner /* in which _bitmap_ extent (resp. sector) the bit for a certain
1226b411b363SPhilipp Reisner  * _storage_ sector is located in */
1227b411b363SPhilipp Reisner #define BM_SECT_TO_EXT(x)   ((x)>>(BM_EXT_SHIFT-9))
1228b411b363SPhilipp Reisner 
1229b411b363SPhilipp Reisner /* how much _storage_ sectors we have per bitmap sector */
1230b411b363SPhilipp Reisner #define BM_EXT_TO_SECT(x)   ((sector_t)(x) << (BM_EXT_SHIFT-9))
1231b411b363SPhilipp Reisner #define BM_SECT_PER_EXT     BM_EXT_TO_SECT(1)
1232b411b363SPhilipp Reisner 
1233b411b363SPhilipp Reisner /* in one sector of the bitmap, we have this many activity_log extents. */
1234b411b363SPhilipp Reisner #define AL_EXT_PER_BM_SECT  (1 << (BM_EXT_SHIFT - AL_EXTENT_SHIFT))
1235b411b363SPhilipp Reisner #define BM_WORDS_PER_AL_EXT (1 << (AL_EXTENT_SHIFT-BM_BLOCK_SHIFT-LN2_BPL))
1236b411b363SPhilipp Reisner 
1237b411b363SPhilipp Reisner #define BM_BLOCKS_PER_BM_EXT_B (BM_EXT_SHIFT - BM_BLOCK_SHIFT)
1238b411b363SPhilipp Reisner #define BM_BLOCKS_PER_BM_EXT_MASK  ((1<<BM_BLOCKS_PER_BM_EXT_B) - 1)
1239b411b363SPhilipp Reisner 
1240b411b363SPhilipp Reisner /* the extent in "PER_EXTENT" below is an activity log extent
1241b411b363SPhilipp Reisner  * we need that many (long words/bytes) to store the bitmap
1242b411b363SPhilipp Reisner  *		     of one AL_EXTENT_SIZE chunk of storage.
1243b411b363SPhilipp Reisner  * we can store the bitmap for that many AL_EXTENTS within
1244b411b363SPhilipp Reisner  * one sector of the _on_disk_ bitmap:
1245b411b363SPhilipp Reisner  * bit	 0	  bit 37   bit 38	     bit (512*8)-1
1246b411b363SPhilipp Reisner  *	     ...|........|........|.. // ..|........|
1247b411b363SPhilipp Reisner  * sect. 0	 `296	  `304			   ^(512*8*8)-1
1248b411b363SPhilipp Reisner  *
1249b411b363SPhilipp Reisner #define BM_WORDS_PER_EXT    ( (AL_EXT_SIZE/BM_BLOCK_SIZE) / BITS_PER_LONG )
1250b411b363SPhilipp Reisner #define BM_BYTES_PER_EXT    ( (AL_EXT_SIZE/BM_BLOCK_SIZE) / 8 )  // 128
1251b411b363SPhilipp Reisner #define BM_EXT_PER_SECT	    ( 512 / BM_BYTES_PER_EXTENT )	 //   4
1252b411b363SPhilipp Reisner  */
1253b411b363SPhilipp Reisner 
1254b411b363SPhilipp Reisner #define DRBD_MAX_SECTORS_32 (0xffffffffLU)
1255b411b363SPhilipp Reisner #define DRBD_MAX_SECTORS_BM \
1256b411b363SPhilipp Reisner 	  ((MD_RESERVED_SECT - MD_BM_OFFSET) * (1LL<<(BM_EXT_SHIFT-9)))
1257b411b363SPhilipp Reisner #if DRBD_MAX_SECTORS_BM < DRBD_MAX_SECTORS_32
1258b411b363SPhilipp Reisner #define DRBD_MAX_SECTORS      DRBD_MAX_SECTORS_BM
1259b411b363SPhilipp Reisner #define DRBD_MAX_SECTORS_FLEX DRBD_MAX_SECTORS_BM
126036bfc7e2SLars Ellenberg #elif !defined(CONFIG_LBDAF) && BITS_PER_LONG == 32
1261b411b363SPhilipp Reisner #define DRBD_MAX_SECTORS      DRBD_MAX_SECTORS_32
1262b411b363SPhilipp Reisner #define DRBD_MAX_SECTORS_FLEX DRBD_MAX_SECTORS_32
1263b411b363SPhilipp Reisner #else
1264b411b363SPhilipp Reisner #define DRBD_MAX_SECTORS      DRBD_MAX_SECTORS_BM
1265b411b363SPhilipp Reisner /* 16 TB in units of sectors */
1266b411b363SPhilipp Reisner #if BITS_PER_LONG == 32
1267b411b363SPhilipp Reisner /* adjust by one page worth of bitmap,
1268b411b363SPhilipp Reisner  * so we won't wrap around in drbd_bm_find_next_bit.
1269b411b363SPhilipp Reisner  * you should use 64bit OS for that much storage, anyways. */
1270b411b363SPhilipp Reisner #define DRBD_MAX_SECTORS_FLEX BM_BIT_TO_SECT(0xffff7fff)
1271b411b363SPhilipp Reisner #else
12724b0715f0SLars Ellenberg /* we allow up to 1 PiB now on 64bit architecture with "flexible" meta data */
12734b0715f0SLars Ellenberg #define DRBD_MAX_SECTORS_FLEX (1UL << 51)
12744b0715f0SLars Ellenberg /* corresponds to (1UL << 38) bits right now. */
1275b411b363SPhilipp Reisner #endif
1276b411b363SPhilipp Reisner #endif
1277b411b363SPhilipp Reisner 
127823361cf3SLars Ellenberg /* BIO_MAX_SIZE is 256 * PAGE_CACHE_SIZE,
127923361cf3SLars Ellenberg  * so for typical PAGE_CACHE_SIZE of 4k, that is (1<<20) Byte.
128023361cf3SLars Ellenberg  * Since we may live in a mixed-platform cluster,
128123361cf3SLars Ellenberg  * we limit us to a platform agnostic constant here for now.
128223361cf3SLars Ellenberg  * A followup commit may allow even bigger BIO sizes,
128323361cf3SLars Ellenberg  * once we thought that through. */
128423361cf3SLars Ellenberg #define DRBD_MAX_BIO_SIZE (1 << 20)
128523361cf3SLars Ellenberg #if DRBD_MAX_BIO_SIZE > BIO_MAX_SIZE
128623361cf3SLars Ellenberg #error Architecture not supported: DRBD_MAX_BIO_SIZE > BIO_MAX_SIZE
128723361cf3SLars Ellenberg #endif
128899432fccSPhilipp Reisner #define DRBD_MAX_BIO_SIZE_SAFE (1 << 12)       /* Works always = 4k */
1289b411b363SPhilipp Reisner 
12902ffca4f3SPhilipp Reisner #define DRBD_MAX_SIZE_H80_PACKET (1 << 15) /* Header 80 only allows packets up to 32KiB data */
12912ffca4f3SPhilipp Reisner #define DRBD_MAX_BIO_SIZE_P95    (1 << 17) /* Protocol 95 to 99 allows bios up to 128KiB */
1292d5373389SPhilipp Reisner 
1293b411b363SPhilipp Reisner extern int  drbd_bm_init(struct drbd_conf *mdev);
129402d9a94bSPhilipp Reisner extern int  drbd_bm_resize(struct drbd_conf *mdev, sector_t sectors, int set_new_bits);
1295b411b363SPhilipp Reisner extern void drbd_bm_cleanup(struct drbd_conf *mdev);
1296b411b363SPhilipp Reisner extern void drbd_bm_set_all(struct drbd_conf *mdev);
1297b411b363SPhilipp Reisner extern void drbd_bm_clear_all(struct drbd_conf *mdev);
12984b0715f0SLars Ellenberg /* set/clear/test only a few bits at a time */
1299b411b363SPhilipp Reisner extern int  drbd_bm_set_bits(
1300b411b363SPhilipp Reisner 		struct drbd_conf *mdev, unsigned long s, unsigned long e);
1301b411b363SPhilipp Reisner extern int  drbd_bm_clear_bits(
1302b411b363SPhilipp Reisner 		struct drbd_conf *mdev, unsigned long s, unsigned long e);
13034b0715f0SLars Ellenberg extern int drbd_bm_count_bits(
13044b0715f0SLars Ellenberg 	struct drbd_conf *mdev, const unsigned long s, const unsigned long e);
13054b0715f0SLars Ellenberg /* bm_set_bits variant for use while holding drbd_bm_lock,
13064b0715f0SLars Ellenberg  * may process the whole bitmap in one go */
1307b411b363SPhilipp Reisner extern void _drbd_bm_set_bits(struct drbd_conf *mdev,
1308b411b363SPhilipp Reisner 		const unsigned long s, const unsigned long e);
1309b411b363SPhilipp Reisner extern int  drbd_bm_test_bit(struct drbd_conf *mdev, unsigned long bitnr);
1310b411b363SPhilipp Reisner extern int  drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr);
131119f843aaSLars Ellenberg extern int  drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local);
1312b411b363SPhilipp Reisner extern int  drbd_bm_read(struct drbd_conf *mdev) __must_hold(local);
131345dfffebSLars Ellenberg extern void drbd_bm_mark_for_writeout(struct drbd_conf *mdev, int page_nr);
1314b411b363SPhilipp Reisner extern int  drbd_bm_write(struct drbd_conf *mdev) __must_hold(local);
131545dfffebSLars Ellenberg extern int  drbd_bm_write_hinted(struct drbd_conf *mdev) __must_hold(local);
1316a220d291SLars Ellenberg extern int  drbd_bm_write_copy_pages(struct drbd_conf *mdev) __must_hold(local);
1317b411b363SPhilipp Reisner extern size_t	     drbd_bm_words(struct drbd_conf *mdev);
1318b411b363SPhilipp Reisner extern unsigned long drbd_bm_bits(struct drbd_conf *mdev);
1319b411b363SPhilipp Reisner extern sector_t      drbd_bm_capacity(struct drbd_conf *mdev);
13204b0715f0SLars Ellenberg 
13214b0715f0SLars Ellenberg #define DRBD_END_OF_BITMAP	(~(unsigned long)0)
1322b411b363SPhilipp Reisner extern unsigned long drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo);
1323b411b363SPhilipp Reisner /* bm_find_next variants for use while you hold drbd_bm_lock() */
1324b411b363SPhilipp Reisner extern unsigned long _drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo);
1325b411b363SPhilipp Reisner extern unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo);
13260778286aSPhilipp Reisner extern unsigned long _drbd_bm_total_weight(struct drbd_conf *mdev);
1327b411b363SPhilipp Reisner extern unsigned long drbd_bm_total_weight(struct drbd_conf *mdev);
1328b411b363SPhilipp Reisner extern int drbd_bm_rs_done(struct drbd_conf *mdev);
1329b411b363SPhilipp Reisner /* for receive_bitmap */
1330b411b363SPhilipp Reisner extern void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset,
1331b411b363SPhilipp Reisner 		size_t number, unsigned long *buffer);
133219f843aaSLars Ellenberg /* for _drbd_send_bitmap */
1333b411b363SPhilipp Reisner extern void drbd_bm_get_lel(struct drbd_conf *mdev, size_t offset,
1334b411b363SPhilipp Reisner 		size_t number, unsigned long *buffer);
1335b411b363SPhilipp Reisner 
133620ceb2b2SLars Ellenberg extern void drbd_bm_lock(struct drbd_conf *mdev, char *why, enum bm_flag flags);
1337b411b363SPhilipp Reisner extern void drbd_bm_unlock(struct drbd_conf *mdev);
1338b411b363SPhilipp Reisner /* drbd_main.c */
1339b411b363SPhilipp Reisner 
1340b411b363SPhilipp Reisner extern struct kmem_cache *drbd_request_cache;
13416c852becSAndreas Gruenbacher extern struct kmem_cache *drbd_ee_cache;	/* peer requests */
1342b411b363SPhilipp Reisner extern struct kmem_cache *drbd_bm_ext_cache;	/* bitmap extents */
1343b411b363SPhilipp Reisner extern struct kmem_cache *drbd_al_ext_cache;	/* activity log extents */
1344b411b363SPhilipp Reisner extern mempool_t *drbd_request_mempool;
1345b411b363SPhilipp Reisner extern mempool_t *drbd_ee_mempool;
1346b411b363SPhilipp Reisner 
134735abf594SLars Ellenberg /* drbd's page pool, used to buffer data received from the peer,
134835abf594SLars Ellenberg  * or data requested by the peer.
134935abf594SLars Ellenberg  *
135035abf594SLars Ellenberg  * This does not have an emergency reserve.
135135abf594SLars Ellenberg  *
135235abf594SLars Ellenberg  * When allocating from this pool, it first takes pages from the pool.
135335abf594SLars Ellenberg  * Only if the pool is depleted will try to allocate from the system.
135435abf594SLars Ellenberg  *
135535abf594SLars Ellenberg  * The assumption is that pages taken from this pool will be processed,
135635abf594SLars Ellenberg  * and given back, "quickly", and then can be recycled, so we can avoid
135735abf594SLars Ellenberg  * frequent calls to alloc_page(), and still will be able to make progress even
135835abf594SLars Ellenberg  * under memory pressure.
135935abf594SLars Ellenberg  */
136035abf594SLars Ellenberg extern struct page *drbd_pp_pool;
1361b411b363SPhilipp Reisner extern spinlock_t   drbd_pp_lock;
1362b411b363SPhilipp Reisner extern int	    drbd_pp_vacant;
1363b411b363SPhilipp Reisner extern wait_queue_head_t drbd_pp_wait;
1364b411b363SPhilipp Reisner 
136535abf594SLars Ellenberg /* We also need a standard (emergency-reserve backed) page pool
136635abf594SLars Ellenberg  * for meta data IO (activity log, bitmap).
136735abf594SLars Ellenberg  * We can keep it global, as long as it is used as "N pages at a time".
136835abf594SLars Ellenberg  * 128 should be plenty, currently we probably can get away with as few as 1.
136935abf594SLars Ellenberg  */
137035abf594SLars Ellenberg #define DRBD_MIN_POOL_PAGES	128
137135abf594SLars Ellenberg extern mempool_t *drbd_md_io_page_pool;
137235abf594SLars Ellenberg 
1373da4a75d2SLars Ellenberg /* We also need to make sure we get a bio
1374da4a75d2SLars Ellenberg  * when we need it for housekeeping purposes */
1375da4a75d2SLars Ellenberg extern struct bio_set *drbd_md_io_bio_set;
1376da4a75d2SLars Ellenberg /* to allocate from that set */
1377da4a75d2SLars Ellenberg extern struct bio *bio_alloc_drbd(gfp_t gfp_mask);
1378da4a75d2SLars Ellenberg 
1379b411b363SPhilipp Reisner extern rwlock_t global_state_lock;
1380b411b363SPhilipp Reisner 
138180883197SPhilipp Reisner extern int conn_lowest_minor(struct drbd_tconn *tconn);
1382774b3055SPhilipp Reisner enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor, int vnr);
138381fa2e67SPhilipp Reisner extern void drbd_minor_destroy(struct kref *kref);
1384b411b363SPhilipp Reisner 
1385afbbfa88SAndreas Gruenbacher extern int set_resource_options(struct drbd_tconn *tconn, struct res_opts *res_opts);
1386afbbfa88SAndreas Gruenbacher extern struct drbd_tconn *conn_create(const char *name, struct res_opts *res_opts);
13879dc9fbb3SPhilipp Reisner extern void conn_destroy(struct kref *kref);
13880ace9dfaSPhilipp Reisner struct drbd_tconn *conn_get_by_name(const char *name);
1389089c075dSAndreas Gruenbacher extern struct drbd_tconn *conn_get_by_addrs(void *my_addr, int my_addr_len,
1390089c075dSAndreas Gruenbacher 					    void *peer_addr, int peer_addr_len);
139191fd4dadSPhilipp Reisner extern void conn_free_crypto(struct drbd_tconn *tconn);
13922111438bSPhilipp Reisner 
1393b411b363SPhilipp Reisner extern int proc_details;
1394b411b363SPhilipp Reisner 
1395b411b363SPhilipp Reisner /* drbd_req */
13967be8da07SAndreas Gruenbacher extern int __drbd_make_request(struct drbd_conf *, struct bio *, unsigned long);
13972f58dcfcSAndreas Gruenbacher extern int drbd_make_request(struct request_queue *q, struct bio *bio);
1398b411b363SPhilipp Reisner extern int drbd_read_remote(struct drbd_conf *mdev, struct drbd_request *req);
1399b411b363SPhilipp Reisner extern int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec);
1400b411b363SPhilipp Reisner extern int is_valid_ar_handle(struct drbd_request *, sector_t);
1401b411b363SPhilipp Reisner 
1402b411b363SPhilipp Reisner 
1403b411b363SPhilipp Reisner /* drbd_nl.c */
14048432b314SLars Ellenberg extern int drbd_msg_put_info(const char *info);
1405b411b363SPhilipp Reisner extern void drbd_suspend_io(struct drbd_conf *mdev);
1406b411b363SPhilipp Reisner extern void drbd_resume_io(struct drbd_conf *mdev);
1407b411b363SPhilipp Reisner extern char *ppsize(char *buf, unsigned long long size);
1408ef5e44a6SPhilipp Reisner extern sector_t drbd_new_dev_size(struct drbd_conf *, struct drbd_backing_dev *, sector_t, int);
1409b411b363SPhilipp Reisner enum determine_dev_size { dev_size_error = -1, unchanged = 0, shrunk = 1, grew = 2 };
141024c4830cSBart Van Assche extern enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *, enum dds_flags) __must_hold(local);
1411b411b363SPhilipp Reisner extern void resync_after_online_grow(struct drbd_conf *);
141299432fccSPhilipp Reisner extern void drbd_reconsider_max_bio_size(struct drbd_conf *mdev);
1413bf885f8aSAndreas Gruenbacher extern enum drbd_state_rv drbd_set_role(struct drbd_conf *mdev,
1414bf885f8aSAndreas Gruenbacher 					enum drbd_role new_role,
1415b411b363SPhilipp Reisner 					int force);
1416cb703454SPhilipp Reisner extern bool conn_try_outdate_peer(struct drbd_tconn *tconn);
1417cb703454SPhilipp Reisner extern void conn_try_outdate_peer_async(struct drbd_tconn *tconn);
1418b411b363SPhilipp Reisner extern int drbd_khelper(struct drbd_conf *mdev, char *cmd);
1419b411b363SPhilipp Reisner 
1420b411b363SPhilipp Reisner /* drbd_worker.c */
1421b411b363SPhilipp Reisner extern int drbd_worker(struct drbd_thread *thi);
142295f8efd0SAndreas Gruenbacher enum drbd_ret_code drbd_resync_after_valid(struct drbd_conf *mdev, int o_minor);
142395f8efd0SAndreas Gruenbacher void drbd_resync_after_changed(struct drbd_conf *mdev);
1424b411b363SPhilipp Reisner extern void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side);
1425b411b363SPhilipp Reisner extern void resume_next_sg(struct drbd_conf *mdev);
1426b411b363SPhilipp Reisner extern void suspend_other_sg(struct drbd_conf *mdev);
1427b411b363SPhilipp Reisner extern int drbd_resync_finished(struct drbd_conf *mdev);
1428b411b363SPhilipp Reisner /* maybe rather drbd_main.c ? */
1429cdfda633SPhilipp Reisner extern void *drbd_md_get_buffer(struct drbd_conf *mdev);
1430cdfda633SPhilipp Reisner extern void drbd_md_put_buffer(struct drbd_conf *mdev);
1431b411b363SPhilipp Reisner extern int drbd_md_sync_page_io(struct drbd_conf *mdev,
1432b411b363SPhilipp Reisner 		struct drbd_backing_dev *bdev, sector_t sector, int rw);
14338f7bed77SAndreas Gruenbacher extern void drbd_ov_out_of_sync_found(struct drbd_conf *, sector_t, int);
143432db80f6SPhilipp Reisner extern void wait_until_done_or_disk_failure(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
143532db80f6SPhilipp Reisner 					    unsigned int *done);
14369bd28d3cSLars Ellenberg extern void drbd_rs_controller_reset(struct drbd_conf *mdev);
1437b411b363SPhilipp Reisner 
14388f7bed77SAndreas Gruenbacher static inline void ov_out_of_sync_print(struct drbd_conf *mdev)
1439b411b363SPhilipp Reisner {
1440b411b363SPhilipp Reisner 	if (mdev->ov_last_oos_size) {
1441b411b363SPhilipp Reisner 		dev_err(DEV, "Out of sync: start=%llu, size=%lu (sectors)\n",
1442b411b363SPhilipp Reisner 		     (unsigned long long)mdev->ov_last_oos_start,
1443b411b363SPhilipp Reisner 		     (unsigned long)mdev->ov_last_oos_size);
1444b411b363SPhilipp Reisner 	}
1445b411b363SPhilipp Reisner 	mdev->ov_last_oos_size=0;
1446b411b363SPhilipp Reisner }
1447b411b363SPhilipp Reisner 
1448b411b363SPhilipp Reisner 
144945bb912bSLars Ellenberg extern void drbd_csum_bio(struct drbd_conf *, struct crypto_hash *, struct bio *, void *);
1450f6ffca9fSAndreas Gruenbacher extern void drbd_csum_ee(struct drbd_conf *, struct crypto_hash *,
1451f6ffca9fSAndreas Gruenbacher 			 struct drbd_peer_request *, void *);
1452b411b363SPhilipp Reisner /* worker callbacks */
145399920dc5SAndreas Gruenbacher extern int w_e_end_data_req(struct drbd_work *, int);
145499920dc5SAndreas Gruenbacher extern int w_e_end_rsdata_req(struct drbd_work *, int);
145599920dc5SAndreas Gruenbacher extern int w_e_end_csum_rs_req(struct drbd_work *, int);
145699920dc5SAndreas Gruenbacher extern int w_e_end_ov_reply(struct drbd_work *, int);
145799920dc5SAndreas Gruenbacher extern int w_e_end_ov_req(struct drbd_work *, int);
145899920dc5SAndreas Gruenbacher extern int w_ov_finished(struct drbd_work *, int);
145999920dc5SAndreas Gruenbacher extern int w_resync_timer(struct drbd_work *, int);
146099920dc5SAndreas Gruenbacher extern int w_send_write_hint(struct drbd_work *, int);
146199920dc5SAndreas Gruenbacher extern int w_make_resync_request(struct drbd_work *, int);
146299920dc5SAndreas Gruenbacher extern int w_send_dblock(struct drbd_work *, int);
146399920dc5SAndreas Gruenbacher extern int w_send_barrier(struct drbd_work *, int);
146499920dc5SAndreas Gruenbacher extern int w_send_read_req(struct drbd_work *, int);
146599920dc5SAndreas Gruenbacher extern int w_prev_work_done(struct drbd_work *, int);
146699920dc5SAndreas Gruenbacher extern int w_e_reissue(struct drbd_work *, int);
146799920dc5SAndreas Gruenbacher extern int w_restart_disk_io(struct drbd_work *, int);
14688f7bed77SAndreas Gruenbacher extern int w_send_out_of_sync(struct drbd_work *, int);
146999920dc5SAndreas Gruenbacher extern int w_start_resync(struct drbd_work *, int);
1470b411b363SPhilipp Reisner 
1471b411b363SPhilipp Reisner extern void resync_timer_fn(unsigned long data);
1472370a43e7SPhilipp Reisner extern void start_resync_timer_fn(unsigned long data);
1473b411b363SPhilipp Reisner 
1474b411b363SPhilipp Reisner /* drbd_receiver.c */
1475e3555d85SPhilipp Reisner extern int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector);
1476fbe29decSAndreas Gruenbacher extern int drbd_submit_peer_request(struct drbd_conf *,
1477fbe29decSAndreas Gruenbacher 				    struct drbd_peer_request *, const unsigned,
1478fbe29decSAndreas Gruenbacher 				    const int);
14797721f567SAndreas Gruenbacher extern int drbd_free_peer_reqs(struct drbd_conf *, struct list_head *);
14800db55363SAndreas Gruenbacher extern struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_conf *, u64,
14810db55363SAndreas Gruenbacher 						     sector_t, unsigned int,
1482f6ffca9fSAndreas Gruenbacher 						     gfp_t) __must_hold(local);
14833967deb1SAndreas Gruenbacher extern void __drbd_free_peer_req(struct drbd_conf *, struct drbd_peer_request *,
1484f6ffca9fSAndreas Gruenbacher 				 int);
14853967deb1SAndreas Gruenbacher #define drbd_free_peer_req(m,e) __drbd_free_peer_req(m, e, 0)
14863967deb1SAndreas Gruenbacher #define drbd_free_net_peer_req(m,e) __drbd_free_peer_req(m, e, 1)
1487c37c8ecfSAndreas Gruenbacher extern struct page *drbd_alloc_pages(struct drbd_conf *, unsigned int, bool);
1488b411b363SPhilipp Reisner extern void drbd_set_recv_tcq(struct drbd_conf *mdev, int tcq_enabled);
1489b411b363SPhilipp Reisner extern void _drbd_clear_done_ee(struct drbd_conf *mdev, struct list_head *to_be_freed);
14900e29d163SPhilipp Reisner extern void conn_flush_workqueue(struct drbd_tconn *tconn);
1491c141ebdaSPhilipp Reisner extern int drbd_connected(struct drbd_conf *mdev);
14920e29d163SPhilipp Reisner static inline void drbd_flush_workqueue(struct drbd_conf *mdev)
14930e29d163SPhilipp Reisner {
14940e29d163SPhilipp Reisner 	conn_flush_workqueue(mdev->tconn);
14950e29d163SPhilipp Reisner }
1496b411b363SPhilipp Reisner 
1497ed439848SLars Ellenberg /* Yes, there is kernel_setsockopt, but only since 2.6.18.
1498ed439848SLars Ellenberg  * So we have our own copy of it here. */
1499b411b363SPhilipp Reisner static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
1500ed439848SLars Ellenberg 				  char *optval, int optlen)
1501b411b363SPhilipp Reisner {
1502ed439848SLars Ellenberg 	mm_segment_t oldfs = get_fs();
1503ed439848SLars Ellenberg 	char __user *uoptval;
1504b411b363SPhilipp Reisner 	int err;
1505ed439848SLars Ellenberg 
1506ed439848SLars Ellenberg 	uoptval = (char __user __force *)optval;
1507ed439848SLars Ellenberg 
1508ed439848SLars Ellenberg 	set_fs(KERNEL_DS);
1509b411b363SPhilipp Reisner 	if (level == SOL_SOCKET)
1510ed439848SLars Ellenberg 		err = sock_setsockopt(sock, level, optname, uoptval, optlen);
1511b411b363SPhilipp Reisner 	else
1512ed439848SLars Ellenberg 		err = sock->ops->setsockopt(sock, level, optname, uoptval,
1513b411b363SPhilipp Reisner 					    optlen);
1514ed439848SLars Ellenberg 	set_fs(oldfs);
1515b411b363SPhilipp Reisner 	return err;
1516b411b363SPhilipp Reisner }
1517b411b363SPhilipp Reisner 
1518b411b363SPhilipp Reisner static inline void drbd_tcp_cork(struct socket *sock)
1519b411b363SPhilipp Reisner {
1520ed439848SLars Ellenberg 	int val = 1;
1521b411b363SPhilipp Reisner 	(void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
1522ed439848SLars Ellenberg 			(char*)&val, sizeof(val));
1523b411b363SPhilipp Reisner }
1524b411b363SPhilipp Reisner 
1525b411b363SPhilipp Reisner static inline void drbd_tcp_uncork(struct socket *sock)
1526b411b363SPhilipp Reisner {
1527ed439848SLars Ellenberg 	int val = 0;
1528b411b363SPhilipp Reisner 	(void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
1529ed439848SLars Ellenberg 			(char*)&val, sizeof(val));
1530b411b363SPhilipp Reisner }
1531b411b363SPhilipp Reisner 
1532b411b363SPhilipp Reisner static inline void drbd_tcp_nodelay(struct socket *sock)
1533b411b363SPhilipp Reisner {
1534ed439848SLars Ellenberg 	int val = 1;
1535b411b363SPhilipp Reisner 	(void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
1536ed439848SLars Ellenberg 			(char*)&val, sizeof(val));
1537b411b363SPhilipp Reisner }
1538b411b363SPhilipp Reisner 
1539b411b363SPhilipp Reisner static inline void drbd_tcp_quickack(struct socket *sock)
1540b411b363SPhilipp Reisner {
1541ed439848SLars Ellenberg 	int val = 2;
1542b411b363SPhilipp Reisner 	(void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
1543ed439848SLars Ellenberg 			(char*)&val, sizeof(val));
1544b411b363SPhilipp Reisner }
1545b411b363SPhilipp Reisner 
15464b0007c0SPhilipp Reisner void drbd_bump_write_ordering(struct drbd_tconn *tconn, enum write_ordering_e wo);
1547b411b363SPhilipp Reisner 
1548b411b363SPhilipp Reisner /* drbd_proc.c */
1549b411b363SPhilipp Reisner extern struct proc_dir_entry *drbd_proc;
15507d4e9d09SEmese Revfy extern const struct file_operations drbd_proc_fops;
1551b411b363SPhilipp Reisner extern const char *drbd_conn_str(enum drbd_conns s);
1552b411b363SPhilipp Reisner extern const char *drbd_role_str(enum drbd_role s);
1553b411b363SPhilipp Reisner 
1554b411b363SPhilipp Reisner /* drbd_actlog.c */
1555181286adSLars Ellenberg extern void drbd_al_begin_io(struct drbd_conf *mdev, struct drbd_interval *i);
1556181286adSLars Ellenberg extern void drbd_al_complete_io(struct drbd_conf *mdev, struct drbd_interval *i);
1557b411b363SPhilipp Reisner extern void drbd_rs_complete_io(struct drbd_conf *mdev, sector_t sector);
1558b411b363SPhilipp Reisner extern int drbd_rs_begin_io(struct drbd_conf *mdev, sector_t sector);
1559b411b363SPhilipp Reisner extern int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector);
1560b411b363SPhilipp Reisner extern void drbd_rs_cancel_all(struct drbd_conf *mdev);
1561b411b363SPhilipp Reisner extern int drbd_rs_del_all(struct drbd_conf *mdev);
1562b411b363SPhilipp Reisner extern void drbd_rs_failed_io(struct drbd_conf *mdev,
1563b411b363SPhilipp Reisner 		sector_t sector, int size);
1564ea5442afSLars Ellenberg extern void drbd_advance_rs_marks(struct drbd_conf *mdev, unsigned long still_to_go);
1565b411b363SPhilipp Reisner extern void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector,
1566b411b363SPhilipp Reisner 		int size, const char *file, const unsigned int line);
1567b411b363SPhilipp Reisner #define drbd_set_in_sync(mdev, sector, size) \
1568b411b363SPhilipp Reisner 	__drbd_set_in_sync(mdev, sector, size, __FILE__, __LINE__)
156973a01a18SPhilipp Reisner extern int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector,
1570b411b363SPhilipp Reisner 		int size, const char *file, const unsigned int line);
1571b411b363SPhilipp Reisner #define drbd_set_out_of_sync(mdev, sector, size) \
1572b411b363SPhilipp Reisner 	__drbd_set_out_of_sync(mdev, sector, size, __FILE__, __LINE__)
1573b411b363SPhilipp Reisner extern void drbd_al_shrink(struct drbd_conf *mdev);
1574b411b363SPhilipp Reisner 
1575b411b363SPhilipp Reisner /* drbd_nl.c */
15763b98c0c2SLars Ellenberg /* state info broadcast */
15773b98c0c2SLars Ellenberg struct sib_info {
15783b98c0c2SLars Ellenberg 	enum drbd_state_info_bcast_reason sib_reason;
15793b98c0c2SLars Ellenberg 	union {
15803b98c0c2SLars Ellenberg 		struct {
15813b98c0c2SLars Ellenberg 			char *helper_name;
15823b98c0c2SLars Ellenberg 			unsigned helper_exit_code;
15833b98c0c2SLars Ellenberg 		};
15843b98c0c2SLars Ellenberg 		struct {
15853b98c0c2SLars Ellenberg 			union drbd_state os;
15863b98c0c2SLars Ellenberg 			union drbd_state ns;
15873b98c0c2SLars Ellenberg 		};
15883b98c0c2SLars Ellenberg 	};
15893b98c0c2SLars Ellenberg };
15903b98c0c2SLars Ellenberg void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib);
1591b411b363SPhilipp Reisner 
1592b411b363SPhilipp Reisner /*
1593b411b363SPhilipp Reisner  * inline helper functions
1594b411b363SPhilipp Reisner  *************************/
1595b411b363SPhilipp Reisner 
159645bb912bSLars Ellenberg /* see also page_chain_add and friends in drbd_receiver.c */
159745bb912bSLars Ellenberg static inline struct page *page_chain_next(struct page *page)
159845bb912bSLars Ellenberg {
159945bb912bSLars Ellenberg 	return (struct page *)page_private(page);
160045bb912bSLars Ellenberg }
160145bb912bSLars Ellenberg #define page_chain_for_each(page) \
160245bb912bSLars Ellenberg 	for (; page && ({ prefetch(page_chain_next(page)); 1; }); \
160345bb912bSLars Ellenberg 			page = page_chain_next(page))
160445bb912bSLars Ellenberg #define page_chain_for_each_safe(page, n) \
160545bb912bSLars Ellenberg 	for (; page && ({ n = page_chain_next(page); 1; }); page = n)
160645bb912bSLars Ellenberg 
160745bb912bSLars Ellenberg 
1608045417f7SAndreas Gruenbacher static inline int drbd_peer_req_has_active_page(struct drbd_peer_request *peer_req)
160945bb912bSLars Ellenberg {
1610db830c46SAndreas Gruenbacher 	struct page *page = peer_req->pages;
161145bb912bSLars Ellenberg 	page_chain_for_each(page) {
161245bb912bSLars Ellenberg 		if (page_count(page) > 1)
161345bb912bSLars Ellenberg 			return 1;
161445bb912bSLars Ellenberg 	}
161545bb912bSLars Ellenberg 	return 0;
161645bb912bSLars Ellenberg }
161745bb912bSLars Ellenberg 
1618bf885f8aSAndreas Gruenbacher static inline enum drbd_state_rv
1619bf885f8aSAndreas Gruenbacher _drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
1620bf885f8aSAndreas Gruenbacher 		enum chg_state_flags flags, struct completion *done)
1621b411b363SPhilipp Reisner {
1622bf885f8aSAndreas Gruenbacher 	enum drbd_state_rv rv;
1623b411b363SPhilipp Reisner 
1624b411b363SPhilipp Reisner 	read_lock(&global_state_lock);
1625b411b363SPhilipp Reisner 	rv = __drbd_set_state(mdev, ns, flags, done);
1626b411b363SPhilipp Reisner 	read_unlock(&global_state_lock);
1627b411b363SPhilipp Reisner 
1628b411b363SPhilipp Reisner 	return rv;
1629b411b363SPhilipp Reisner }
1630b411b363SPhilipp Reisner 
163178bae59bSPhilipp Reisner static inline union drbd_state drbd_read_state(struct drbd_conf *mdev)
163278bae59bSPhilipp Reisner {
163378bae59bSPhilipp Reisner 	union drbd_state rv;
163478bae59bSPhilipp Reisner 
1635da9fbc27SPhilipp Reisner 	rv.i = mdev->state.i;
16368e0af25fSPhilipp Reisner 	rv.susp = mdev->tconn->susp;
16378e0af25fSPhilipp Reisner 	rv.susp_nod = mdev->tconn->susp_nod;
16388e0af25fSPhilipp Reisner 	rv.susp_fen = mdev->tconn->susp_fen;
163978bae59bSPhilipp Reisner 
164078bae59bSPhilipp Reisner 	return rv;
164178bae59bSPhilipp Reisner }
164278bae59bSPhilipp Reisner 
1643b411b363SPhilipp Reisner #define __drbd_chk_io_error(m,f) __drbd_chk_io_error_(m,f, __func__)
1644b411b363SPhilipp Reisner static inline void __drbd_chk_io_error_(struct drbd_conf *mdev, int forcedetach, const char *where)
1645b411b363SPhilipp Reisner {
1646daeda1ccSPhilipp Reisner 	enum drbd_io_error_p ep;
1647daeda1ccSPhilipp Reisner 
1648daeda1ccSPhilipp Reisner 	rcu_read_lock();
1649daeda1ccSPhilipp Reisner 	ep = rcu_dereference(mdev->ldev->disk_conf)->on_io_error;
1650daeda1ccSPhilipp Reisner 	rcu_read_unlock();
1651daeda1ccSPhilipp Reisner 	switch (ep) {
1652daeda1ccSPhilipp Reisner 	case EP_PASS_ON: /* FIXME would this be better named "Ignore"? */
1653b411b363SPhilipp Reisner 		if (!forcedetach) {
16547383506cSLars Ellenberg 			if (__ratelimit(&drbd_ratelimit_state))
165582f59cc6SLars Ellenberg 				dev_err(DEV, "Local IO failed in %s.\n", where);
1656d2e17807SPhilipp Reisner 			if (mdev->state.disk > D_INCONSISTENT)
1657738a84b2SPhilipp Reisner 				_drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_HARD, NULL);
1658b411b363SPhilipp Reisner 			break;
1659b411b363SPhilipp Reisner 		}
1660b411b363SPhilipp Reisner 		/* NOTE fall through to detach case if forcedetach set */
1661b411b363SPhilipp Reisner 	case EP_DETACH:
1662b411b363SPhilipp Reisner 	case EP_CALL_HELPER:
166382f59cc6SLars Ellenberg 		set_bit(WAS_IO_ERROR, &mdev->flags);
1664b411b363SPhilipp Reisner 		if (mdev->state.disk > D_FAILED) {
1665b411b363SPhilipp Reisner 			_drbd_set_state(_NS(mdev, disk, D_FAILED), CS_HARD, NULL);
166682f59cc6SLars Ellenberg 			dev_err(DEV,
166782f59cc6SLars Ellenberg 				"Local IO failed in %s. Detaching...\n", where);
1668b411b363SPhilipp Reisner 		}
1669b411b363SPhilipp Reisner 		break;
1670b411b363SPhilipp Reisner 	}
1671b411b363SPhilipp Reisner }
1672b411b363SPhilipp Reisner 
1673b411b363SPhilipp Reisner /**
1674b411b363SPhilipp Reisner  * drbd_chk_io_error: Handle the on_io_error setting, should be called from all io completion handlers
1675b411b363SPhilipp Reisner  * @mdev:	 DRBD device.
1676b411b363SPhilipp Reisner  * @error:	 Error code passed to the IO completion callback
1677b411b363SPhilipp Reisner  * @forcedetach: Force detach. I.e. the error happened while accessing the meta data
1678b411b363SPhilipp Reisner  *
1679b411b363SPhilipp Reisner  * See also drbd_main.c:after_state_ch() if (os.disk > D_FAILED && ns.disk == D_FAILED)
1680b411b363SPhilipp Reisner  */
1681b411b363SPhilipp Reisner #define drbd_chk_io_error(m,e,f) drbd_chk_io_error_(m,e,f, __func__)
1682b411b363SPhilipp Reisner static inline void drbd_chk_io_error_(struct drbd_conf *mdev,
1683b411b363SPhilipp Reisner 	int error, int forcedetach, const char *where)
1684b411b363SPhilipp Reisner {
1685b411b363SPhilipp Reisner 	if (error) {
1686b411b363SPhilipp Reisner 		unsigned long flags;
168787eeee41SPhilipp Reisner 		spin_lock_irqsave(&mdev->tconn->req_lock, flags);
1688b411b363SPhilipp Reisner 		__drbd_chk_io_error_(mdev, forcedetach, where);
168987eeee41SPhilipp Reisner 		spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
1690b411b363SPhilipp Reisner 	}
1691b411b363SPhilipp Reisner }
1692b411b363SPhilipp Reisner 
1693b411b363SPhilipp Reisner 
1694b411b363SPhilipp Reisner /**
1695b411b363SPhilipp Reisner  * drbd_md_first_sector() - Returns the first sector number of the meta data area
1696b411b363SPhilipp Reisner  * @bdev:	Meta data block device.
1697b411b363SPhilipp Reisner  *
1698b411b363SPhilipp Reisner  * BTW, for internal meta data, this happens to be the maximum capacity
1699b411b363SPhilipp Reisner  * we could agree upon with our peer node.
1700b411b363SPhilipp Reisner  */
1701daeda1ccSPhilipp Reisner static inline sector_t _drbd_md_first_sector(int meta_dev_idx, struct drbd_backing_dev *bdev)
1702b411b363SPhilipp Reisner {
1703daeda1ccSPhilipp Reisner 	switch (meta_dev_idx) {
1704b411b363SPhilipp Reisner 	case DRBD_MD_INDEX_INTERNAL:
1705b411b363SPhilipp Reisner 	case DRBD_MD_INDEX_FLEX_INT:
1706b411b363SPhilipp Reisner 		return bdev->md.md_offset + bdev->md.bm_offset;
1707b411b363SPhilipp Reisner 	case DRBD_MD_INDEX_FLEX_EXT:
1708b411b363SPhilipp Reisner 	default:
1709b411b363SPhilipp Reisner 		return bdev->md.md_offset;
1710b411b363SPhilipp Reisner 	}
1711b411b363SPhilipp Reisner }
1712b411b363SPhilipp Reisner 
1713daeda1ccSPhilipp Reisner static inline sector_t drbd_md_first_sector(struct drbd_backing_dev *bdev)
1714daeda1ccSPhilipp Reisner {
1715daeda1ccSPhilipp Reisner 	int meta_dev_idx;
1716daeda1ccSPhilipp Reisner 
1717daeda1ccSPhilipp Reisner 	rcu_read_lock();
1718daeda1ccSPhilipp Reisner 	meta_dev_idx = rcu_dereference(bdev->disk_conf)->meta_dev_idx;
1719daeda1ccSPhilipp Reisner 	rcu_read_unlock();
1720daeda1ccSPhilipp Reisner 
1721daeda1ccSPhilipp Reisner 	return _drbd_md_first_sector(meta_dev_idx, bdev);
1722daeda1ccSPhilipp Reisner }
1723daeda1ccSPhilipp Reisner 
1724b411b363SPhilipp Reisner /**
1725b411b363SPhilipp Reisner  * drbd_md_last_sector() - Return the last sector number of the meta data area
1726b411b363SPhilipp Reisner  * @bdev:	Meta data block device.
1727b411b363SPhilipp Reisner  */
1728b411b363SPhilipp Reisner static inline sector_t drbd_md_last_sector(struct drbd_backing_dev *bdev)
1729b411b363SPhilipp Reisner {
1730daeda1ccSPhilipp Reisner 	int meta_dev_idx;
1731daeda1ccSPhilipp Reisner 
1732daeda1ccSPhilipp Reisner 	rcu_read_lock();
1733daeda1ccSPhilipp Reisner 	meta_dev_idx = rcu_dereference(bdev->disk_conf)->meta_dev_idx;
1734daeda1ccSPhilipp Reisner 	rcu_read_unlock();
1735daeda1ccSPhilipp Reisner 
1736daeda1ccSPhilipp Reisner 	switch (meta_dev_idx) {
1737b411b363SPhilipp Reisner 	case DRBD_MD_INDEX_INTERNAL:
1738b411b363SPhilipp Reisner 	case DRBD_MD_INDEX_FLEX_INT:
1739b411b363SPhilipp Reisner 		return bdev->md.md_offset + MD_AL_OFFSET - 1;
1740b411b363SPhilipp Reisner 	case DRBD_MD_INDEX_FLEX_EXT:
1741b411b363SPhilipp Reisner 	default:
1742b411b363SPhilipp Reisner 		return bdev->md.md_offset + bdev->md.md_size_sect;
1743b411b363SPhilipp Reisner 	}
1744b411b363SPhilipp Reisner }
1745b411b363SPhilipp Reisner 
1746b411b363SPhilipp Reisner /* Returns the number of 512 byte sectors of the device */
1747b411b363SPhilipp Reisner static inline sector_t drbd_get_capacity(struct block_device *bdev)
1748b411b363SPhilipp Reisner {
1749b411b363SPhilipp Reisner 	/* return bdev ? get_capacity(bdev->bd_disk) : 0; */
175077304d2aSMike Snitzer 	return bdev ? i_size_read(bdev->bd_inode) >> 9 : 0;
1751b411b363SPhilipp Reisner }
1752b411b363SPhilipp Reisner 
1753b411b363SPhilipp Reisner /**
1754b411b363SPhilipp Reisner  * drbd_get_max_capacity() - Returns the capacity we announce to out peer
1755b411b363SPhilipp Reisner  * @bdev:	Meta data block device.
1756b411b363SPhilipp Reisner  *
1757b411b363SPhilipp Reisner  * returns the capacity we announce to out peer.  we clip ourselves at the
1758b411b363SPhilipp Reisner  * various MAX_SECTORS, because if we don't, current implementation will
1759b411b363SPhilipp Reisner  * oops sooner or later
1760b411b363SPhilipp Reisner  */
1761b411b363SPhilipp Reisner static inline sector_t drbd_get_max_capacity(struct drbd_backing_dev *bdev)
1762b411b363SPhilipp Reisner {
1763b411b363SPhilipp Reisner 	sector_t s;
1764daeda1ccSPhilipp Reisner 	int meta_dev_idx;
1765daeda1ccSPhilipp Reisner 
1766daeda1ccSPhilipp Reisner 	rcu_read_lock();
1767daeda1ccSPhilipp Reisner 	meta_dev_idx = rcu_dereference(bdev->disk_conf)->meta_dev_idx;
1768daeda1ccSPhilipp Reisner 	rcu_read_unlock();
1769daeda1ccSPhilipp Reisner 
1770daeda1ccSPhilipp Reisner 	switch (meta_dev_idx) {
1771b411b363SPhilipp Reisner 	case DRBD_MD_INDEX_INTERNAL:
1772b411b363SPhilipp Reisner 	case DRBD_MD_INDEX_FLEX_INT:
1773b411b363SPhilipp Reisner 		s = drbd_get_capacity(bdev->backing_bdev)
1774b411b363SPhilipp Reisner 			? min_t(sector_t, DRBD_MAX_SECTORS_FLEX,
1775daeda1ccSPhilipp Reisner 				_drbd_md_first_sector(meta_dev_idx, bdev))
1776b411b363SPhilipp Reisner 			: 0;
1777b411b363SPhilipp Reisner 		break;
1778b411b363SPhilipp Reisner 	case DRBD_MD_INDEX_FLEX_EXT:
1779b411b363SPhilipp Reisner 		s = min_t(sector_t, DRBD_MAX_SECTORS_FLEX,
1780b411b363SPhilipp Reisner 				drbd_get_capacity(bdev->backing_bdev));
1781b411b363SPhilipp Reisner 		/* clip at maximum size the meta device can support */
1782b411b363SPhilipp Reisner 		s = min_t(sector_t, s,
1783b411b363SPhilipp Reisner 			BM_EXT_TO_SECT(bdev->md.md_size_sect
1784b411b363SPhilipp Reisner 				     - bdev->md.bm_offset));
1785b411b363SPhilipp Reisner 		break;
1786b411b363SPhilipp Reisner 	default:
1787b411b363SPhilipp Reisner 		s = min_t(sector_t, DRBD_MAX_SECTORS,
1788b411b363SPhilipp Reisner 				drbd_get_capacity(bdev->backing_bdev));
1789b411b363SPhilipp Reisner 	}
1790b411b363SPhilipp Reisner 	return s;
1791b411b363SPhilipp Reisner }
1792b411b363SPhilipp Reisner 
1793b411b363SPhilipp Reisner /**
1794b411b363SPhilipp Reisner  * drbd_md_ss__() - Return the sector number of our meta data super block
1795b411b363SPhilipp Reisner  * @mdev:	DRBD device.
1796b411b363SPhilipp Reisner  * @bdev:	Meta data block device.
1797b411b363SPhilipp Reisner  */
1798b411b363SPhilipp Reisner static inline sector_t drbd_md_ss__(struct drbd_conf *mdev,
1799b411b363SPhilipp Reisner 				    struct drbd_backing_dev *bdev)
1800b411b363SPhilipp Reisner {
1801daeda1ccSPhilipp Reisner 	int meta_dev_idx;
1802daeda1ccSPhilipp Reisner 
1803daeda1ccSPhilipp Reisner 	rcu_read_lock();
1804daeda1ccSPhilipp Reisner 	meta_dev_idx = rcu_dereference(bdev->disk_conf)->meta_dev_idx;
1805daeda1ccSPhilipp Reisner 	rcu_read_unlock();
1806daeda1ccSPhilipp Reisner 
1807daeda1ccSPhilipp Reisner 	switch (meta_dev_idx) {
1808b411b363SPhilipp Reisner 	default: /* external, some index */
1809daeda1ccSPhilipp Reisner 		return MD_RESERVED_SECT * meta_dev_idx;
1810b411b363SPhilipp Reisner 	case DRBD_MD_INDEX_INTERNAL:
1811b411b363SPhilipp Reisner 		/* with drbd08, internal meta data is always "flexible" */
1812b411b363SPhilipp Reisner 	case DRBD_MD_INDEX_FLEX_INT:
1813b411b363SPhilipp Reisner 		/* sizeof(struct md_on_disk_07) == 4k
1814b411b363SPhilipp Reisner 		 * position: last 4k aligned block of 4k size */
1815b411b363SPhilipp Reisner 		if (!bdev->backing_bdev) {
1816b411b363SPhilipp Reisner 			if (__ratelimit(&drbd_ratelimit_state)) {
1817b411b363SPhilipp Reisner 				dev_err(DEV, "bdev->backing_bdev==NULL\n");
1818b411b363SPhilipp Reisner 				dump_stack();
1819b411b363SPhilipp Reisner 			}
1820b411b363SPhilipp Reisner 			return 0;
1821b411b363SPhilipp Reisner 		}
1822b411b363SPhilipp Reisner 		return (drbd_get_capacity(bdev->backing_bdev) & ~7ULL)
1823b411b363SPhilipp Reisner 			- MD_AL_OFFSET;
1824b411b363SPhilipp Reisner 	case DRBD_MD_INDEX_FLEX_EXT:
1825b411b363SPhilipp Reisner 		return 0;
1826b411b363SPhilipp Reisner 	}
1827b411b363SPhilipp Reisner }
1828b411b363SPhilipp Reisner 
1829b411b363SPhilipp Reisner static inline void
1830b411b363SPhilipp Reisner drbd_queue_work_front(struct drbd_work_queue *q, struct drbd_work *w)
1831b411b363SPhilipp Reisner {
1832b411b363SPhilipp Reisner 	unsigned long flags;
1833b411b363SPhilipp Reisner 	spin_lock_irqsave(&q->q_lock, flags);
1834b411b363SPhilipp Reisner 	list_add(&w->list, &q->q);
1835b411b363SPhilipp Reisner 	spin_unlock_irqrestore(&q->q_lock, flags);
18368c0785a5SLars Ellenberg 	wake_up(&q->q_wait);
1837b411b363SPhilipp Reisner }
1838b411b363SPhilipp Reisner 
1839b411b363SPhilipp Reisner static inline void
1840b411b363SPhilipp Reisner drbd_queue_work(struct drbd_work_queue *q, struct drbd_work *w)
1841b411b363SPhilipp Reisner {
1842b411b363SPhilipp Reisner 	unsigned long flags;
1843b411b363SPhilipp Reisner 	spin_lock_irqsave(&q->q_lock, flags);
1844b411b363SPhilipp Reisner 	list_add_tail(&w->list, &q->q);
1845b411b363SPhilipp Reisner 	spin_unlock_irqrestore(&q->q_lock, flags);
18468c0785a5SLars Ellenberg 	wake_up(&q->q_wait);
1847b411b363SPhilipp Reisner }
1848b411b363SPhilipp Reisner 
18490625ac19SPhilipp Reisner static inline void wake_asender(struct drbd_tconn *tconn)
1850b411b363SPhilipp Reisner {
18510625ac19SPhilipp Reisner 	if (test_bit(SIGNAL_ASENDER, &tconn->flags))
18520625ac19SPhilipp Reisner 		force_sig(DRBD_SIG, tconn->asender.task);
1853b411b363SPhilipp Reisner }
1854b411b363SPhilipp Reisner 
18550625ac19SPhilipp Reisner static inline void request_ping(struct drbd_tconn *tconn)
1856b411b363SPhilipp Reisner {
18570625ac19SPhilipp Reisner 	set_bit(SEND_PING, &tconn->flags);
18580625ac19SPhilipp Reisner 	wake_asender(tconn);
1859b411b363SPhilipp Reisner }
1860b411b363SPhilipp Reisner 
1861dba58587SAndreas Gruenbacher extern void *conn_prepare_command(struct drbd_tconn *, struct drbd_socket *);
1862dba58587SAndreas Gruenbacher extern void *drbd_prepare_command(struct drbd_conf *, struct drbd_socket *);
1863dba58587SAndreas Gruenbacher extern int conn_send_command(struct drbd_tconn *, struct drbd_socket *,
1864dba58587SAndreas Gruenbacher 			     enum drbd_packet, unsigned int, void *,
1865dba58587SAndreas Gruenbacher 			     unsigned int);
1866dba58587SAndreas Gruenbacher extern int drbd_send_command(struct drbd_conf *, struct drbd_socket *,
1867dba58587SAndreas Gruenbacher 			     enum drbd_packet, unsigned int, void *,
1868dba58587SAndreas Gruenbacher 			     unsigned int);
1869dba58587SAndreas Gruenbacher 
1870e307f352SAndreas Gruenbacher extern int drbd_send_ping(struct drbd_tconn *tconn);
1871e307f352SAndreas Gruenbacher extern int drbd_send_ping_ack(struct drbd_tconn *tconn);
18729f5bdc33SAndreas Gruenbacher extern int drbd_send_state_req(struct drbd_conf *, union drbd_state, union drbd_state);
18739f5bdc33SAndreas Gruenbacher extern int conn_send_state_req(struct drbd_tconn *, union drbd_state, union drbd_state);
1874cf29c9d8SPhilipp Reisner 
1875b411b363SPhilipp Reisner static inline void drbd_thread_stop(struct drbd_thread *thi)
1876b411b363SPhilipp Reisner {
187781e84650SAndreas Gruenbacher 	_drbd_thread_stop(thi, false, true);
1878b411b363SPhilipp Reisner }
1879b411b363SPhilipp Reisner 
1880b411b363SPhilipp Reisner static inline void drbd_thread_stop_nowait(struct drbd_thread *thi)
1881b411b363SPhilipp Reisner {
188281e84650SAndreas Gruenbacher 	_drbd_thread_stop(thi, false, false);
1883b411b363SPhilipp Reisner }
1884b411b363SPhilipp Reisner 
1885b411b363SPhilipp Reisner static inline void drbd_thread_restart_nowait(struct drbd_thread *thi)
1886b411b363SPhilipp Reisner {
188781e84650SAndreas Gruenbacher 	_drbd_thread_stop(thi, true, false);
1888b411b363SPhilipp Reisner }
1889b411b363SPhilipp Reisner 
1890b411b363SPhilipp Reisner /* counts how many answer packets packets we expect from our peer,
1891b411b363SPhilipp Reisner  * for either explicit application requests,
1892b411b363SPhilipp Reisner  * or implicit barrier packets as necessary.
1893b411b363SPhilipp Reisner  * increased:
1894b411b363SPhilipp Reisner  *  w_send_barrier
18958554df1cSAndreas Gruenbacher  *  _req_mod(req, QUEUE_FOR_NET_WRITE or QUEUE_FOR_NET_READ);
1896b411b363SPhilipp Reisner  *    it is much easier and equally valid to count what we queue for the
1897b411b363SPhilipp Reisner  *    worker, even before it actually was queued or send.
1898b411b363SPhilipp Reisner  *    (drbd_make_request_common; recovery path on read io-error)
1899b411b363SPhilipp Reisner  * decreased:
1900b411b363SPhilipp Reisner  *  got_BarrierAck (respective tl_clear, tl_clear_barrier)
19018554df1cSAndreas Gruenbacher  *  _req_mod(req, DATA_RECEIVED)
1902b411b363SPhilipp Reisner  *     [from receive_DataReply]
19038554df1cSAndreas Gruenbacher  *  _req_mod(req, WRITE_ACKED_BY_PEER or RECV_ACKED_BY_PEER or NEG_ACKED)
1904b411b363SPhilipp Reisner  *     [from got_BlockAck (P_WRITE_ACK, P_RECV_ACK)]
1905b411b363SPhilipp Reisner  *     for some reason it is NOT decreased in got_NegAck,
1906b411b363SPhilipp Reisner  *     but in the resulting cleanup code from report_params.
1907b411b363SPhilipp Reisner  *     we should try to remember the reason for that...
19088554df1cSAndreas Gruenbacher  *  _req_mod(req, SEND_FAILED or SEND_CANCELED)
19098554df1cSAndreas Gruenbacher  *  _req_mod(req, CONNECTION_LOST_WHILE_PENDING)
1910b411b363SPhilipp Reisner  *     [from tl_clear_barrier]
1911b411b363SPhilipp Reisner  */
1912b411b363SPhilipp Reisner static inline void inc_ap_pending(struct drbd_conf *mdev)
1913b411b363SPhilipp Reisner {
1914b411b363SPhilipp Reisner 	atomic_inc(&mdev->ap_pending_cnt);
1915b411b363SPhilipp Reisner }
1916b411b363SPhilipp Reisner 
191749559d87SPhilipp Reisner #define ERR_IF_CNT_IS_NEGATIVE(which, func, line)			\
1918b411b363SPhilipp Reisner 	if (atomic_read(&mdev->which) < 0)				\
1919b411b363SPhilipp Reisner 		dev_err(DEV, "in %s:%d: " #which " = %d < 0 !\n",	\
192049559d87SPhilipp Reisner 			func, line,					\
1921b411b363SPhilipp Reisner 			atomic_read(&mdev->which))
1922b411b363SPhilipp Reisner 
192349559d87SPhilipp Reisner #define dec_ap_pending(mdev) _dec_ap_pending(mdev, __FUNCTION__, __LINE__)
192449559d87SPhilipp Reisner static inline void _dec_ap_pending(struct drbd_conf *mdev, const char *func, int line)
192549559d87SPhilipp Reisner {
192649559d87SPhilipp Reisner 	if (atomic_dec_and_test(&mdev->ap_pending_cnt))
192749559d87SPhilipp Reisner 		wake_up(&mdev->misc_wait);
192849559d87SPhilipp Reisner 	ERR_IF_CNT_IS_NEGATIVE(ap_pending_cnt, func, line);
192949559d87SPhilipp Reisner }
1930b411b363SPhilipp Reisner 
1931b411b363SPhilipp Reisner /* counts how many resync-related answers we still expect from the peer
1932b411b363SPhilipp Reisner  *		     increase			decrease
1933b411b363SPhilipp Reisner  * C_SYNC_TARGET sends P_RS_DATA_REQUEST (and expects P_RS_DATA_REPLY)
193425985edcSLucas De Marchi  * C_SYNC_SOURCE sends P_RS_DATA_REPLY   (and expects P_WRITE_ACK with ID_SYNCER)
1935b411b363SPhilipp Reisner  *					   (or P_NEG_ACK with ID_SYNCER)
1936b411b363SPhilipp Reisner  */
1937b411b363SPhilipp Reisner static inline void inc_rs_pending(struct drbd_conf *mdev)
1938b411b363SPhilipp Reisner {
1939b411b363SPhilipp Reisner 	atomic_inc(&mdev->rs_pending_cnt);
1940b411b363SPhilipp Reisner }
1941b411b363SPhilipp Reisner 
194249559d87SPhilipp Reisner #define dec_rs_pending(mdev) _dec_rs_pending(mdev, __FUNCTION__, __LINE__)
194349559d87SPhilipp Reisner static inline void _dec_rs_pending(struct drbd_conf *mdev, const char *func, int line)
194449559d87SPhilipp Reisner {
194549559d87SPhilipp Reisner 	atomic_dec(&mdev->rs_pending_cnt);
194649559d87SPhilipp Reisner 	ERR_IF_CNT_IS_NEGATIVE(rs_pending_cnt, func, line);
194749559d87SPhilipp Reisner }
1948b411b363SPhilipp Reisner 
1949b411b363SPhilipp Reisner /* counts how many answers we still need to send to the peer.
1950b411b363SPhilipp Reisner  * increased on
1951b411b363SPhilipp Reisner  *  receive_Data	unless protocol A;
1952b411b363SPhilipp Reisner  *			we need to send a P_RECV_ACK (proto B)
1953b411b363SPhilipp Reisner  *			or P_WRITE_ACK (proto C)
1954b411b363SPhilipp Reisner  *  receive_RSDataReply (recv_resync_read) we need to send a P_WRITE_ACK
1955b411b363SPhilipp Reisner  *  receive_DataRequest (receive_RSDataRequest) we need to send back P_DATA
1956b411b363SPhilipp Reisner  *  receive_Barrier_*	we need to send a P_BARRIER_ACK
1957b411b363SPhilipp Reisner  */
1958b411b363SPhilipp Reisner static inline void inc_unacked(struct drbd_conf *mdev)
1959b411b363SPhilipp Reisner {
1960b411b363SPhilipp Reisner 	atomic_inc(&mdev->unacked_cnt);
1961b411b363SPhilipp Reisner }
1962b411b363SPhilipp Reisner 
196349559d87SPhilipp Reisner #define dec_unacked(mdev) _dec_unacked(mdev, __FUNCTION__, __LINE__)
196449559d87SPhilipp Reisner static inline void _dec_unacked(struct drbd_conf *mdev, const char *func, int line)
196549559d87SPhilipp Reisner {
196649559d87SPhilipp Reisner 	atomic_dec(&mdev->unacked_cnt);
196749559d87SPhilipp Reisner 	ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line);
196849559d87SPhilipp Reisner }
1969b411b363SPhilipp Reisner 
197049559d87SPhilipp Reisner #define sub_unacked(mdev, n) _sub_unacked(mdev, n, __FUNCTION__, __LINE__)
197149559d87SPhilipp Reisner static inline void _sub_unacked(struct drbd_conf *mdev, int n, const char *func, int line)
197249559d87SPhilipp Reisner {
197349559d87SPhilipp Reisner 	atomic_sub(n, &mdev->unacked_cnt);
197449559d87SPhilipp Reisner 	ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line);
197549559d87SPhilipp Reisner }
1976b411b363SPhilipp Reisner 
1977b411b363SPhilipp Reisner /**
1978b411b363SPhilipp Reisner  * get_ldev() - Increase the ref count on mdev->ldev. Returns 0 if there is no ldev
1979b411b363SPhilipp Reisner  * @M:		DRBD device.
1980b411b363SPhilipp Reisner  *
1981b411b363SPhilipp Reisner  * You have to call put_ldev() when finished working with mdev->ldev.
1982b411b363SPhilipp Reisner  */
1983b411b363SPhilipp Reisner #define get_ldev(M) __cond_lock(local, _get_ldev_if_state(M,D_INCONSISTENT))
1984b411b363SPhilipp Reisner #define get_ldev_if_state(M,MINS) __cond_lock(local, _get_ldev_if_state(M,MINS))
1985b411b363SPhilipp Reisner 
1986b411b363SPhilipp Reisner static inline void put_ldev(struct drbd_conf *mdev)
1987b411b363SPhilipp Reisner {
19881d7734a0SLars Ellenberg 	int i = atomic_dec_return(&mdev->local_cnt);
19899a0d9d03SLars Ellenberg 
19909a0d9d03SLars Ellenberg 	/* This may be called from some endio handler,
19919a0d9d03SLars Ellenberg 	 * so we must not sleep here. */
19929a0d9d03SLars Ellenberg 
1993b411b363SPhilipp Reisner 	__release(local);
19941d7734a0SLars Ellenberg 	D_ASSERT(i >= 0);
1995e9e6f3ecSLars Ellenberg 	if (i == 0) {
199682f59cc6SLars Ellenberg 		if (mdev->state.disk == D_DISKLESS)
199782f59cc6SLars Ellenberg 			/* even internal references gone, safe to destroy */
199882f59cc6SLars Ellenberg 			drbd_ldev_destroy(mdev);
1999e9e6f3ecSLars Ellenberg 		if (mdev->state.disk == D_FAILED)
200082f59cc6SLars Ellenberg 			/* all application IO references gone. */
2001e9e6f3ecSLars Ellenberg 			drbd_go_diskless(mdev);
2002b411b363SPhilipp Reisner 		wake_up(&mdev->misc_wait);
2003b411b363SPhilipp Reisner 	}
2004e9e6f3ecSLars Ellenberg }
2005b411b363SPhilipp Reisner 
2006b411b363SPhilipp Reisner #ifndef __CHECKER__
2007b411b363SPhilipp Reisner static inline int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins)
2008b411b363SPhilipp Reisner {
2009b411b363SPhilipp Reisner 	int io_allowed;
2010b411b363SPhilipp Reisner 
201182f59cc6SLars Ellenberg 	/* never get a reference while D_DISKLESS */
201282f59cc6SLars Ellenberg 	if (mdev->state.disk == D_DISKLESS)
201382f59cc6SLars Ellenberg 		return 0;
201482f59cc6SLars Ellenberg 
2015b411b363SPhilipp Reisner 	atomic_inc(&mdev->local_cnt);
2016b411b363SPhilipp Reisner 	io_allowed = (mdev->state.disk >= mins);
2017b411b363SPhilipp Reisner 	if (!io_allowed)
2018b411b363SPhilipp Reisner 		put_ldev(mdev);
2019b411b363SPhilipp Reisner 	return io_allowed;
2020b411b363SPhilipp Reisner }
2021b411b363SPhilipp Reisner #else
2022b411b363SPhilipp Reisner extern int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins);
2023b411b363SPhilipp Reisner #endif
2024b411b363SPhilipp Reisner 
2025b411b363SPhilipp Reisner /* you must have an "get_ldev" reference */
2026b411b363SPhilipp Reisner static inline void drbd_get_syncer_progress(struct drbd_conf *mdev,
2027b411b363SPhilipp Reisner 		unsigned long *bits_left, unsigned int *per_mil_done)
2028b411b363SPhilipp Reisner {
20294b0715f0SLars Ellenberg 	/* this is to break it at compile time when we change that, in case we
20304b0715f0SLars Ellenberg 	 * want to support more than (1<<32) bits on a 32bit arch. */
2031b411b363SPhilipp Reisner 	typecheck(unsigned long, mdev->rs_total);
2032b411b363SPhilipp Reisner 
2033b411b363SPhilipp Reisner 	/* note: both rs_total and rs_left are in bits, i.e. in
2034b411b363SPhilipp Reisner 	 * units of BM_BLOCK_SIZE.
2035b411b363SPhilipp Reisner 	 * for the percentage, we don't care. */
2036b411b363SPhilipp Reisner 
2037439d5953SLars Ellenberg 	if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T)
2038439d5953SLars Ellenberg 		*bits_left = mdev->ov_left;
2039439d5953SLars Ellenberg 	else
2040b411b363SPhilipp Reisner 		*bits_left = drbd_bm_total_weight(mdev) - mdev->rs_failed;
2041b411b363SPhilipp Reisner 	/* >> 10 to prevent overflow,
2042b411b363SPhilipp Reisner 	 * +1 to prevent division by zero */
2043b411b363SPhilipp Reisner 	if (*bits_left > mdev->rs_total) {
2044b411b363SPhilipp Reisner 		/* doh. maybe a logic bug somewhere.
2045b411b363SPhilipp Reisner 		 * may also be just a race condition
2046b411b363SPhilipp Reisner 		 * between this and a disconnect during sync.
2047b411b363SPhilipp Reisner 		 * for now, just prevent in-kernel buffer overflow.
2048b411b363SPhilipp Reisner 		 */
2049b411b363SPhilipp Reisner 		smp_rmb();
2050b411b363SPhilipp Reisner 		dev_warn(DEV, "cs:%s rs_left=%lu > rs_total=%lu (rs_failed %lu)\n",
2051b411b363SPhilipp Reisner 				drbd_conn_str(mdev->state.conn),
2052b411b363SPhilipp Reisner 				*bits_left, mdev->rs_total, mdev->rs_failed);
2053b411b363SPhilipp Reisner 		*per_mil_done = 0;
2054b411b363SPhilipp Reisner 	} else {
20554b0715f0SLars Ellenberg 		/* Make sure the division happens in long context.
20564b0715f0SLars Ellenberg 		 * We allow up to one petabyte storage right now,
20574b0715f0SLars Ellenberg 		 * at a granularity of 4k per bit that is 2**38 bits.
20584b0715f0SLars Ellenberg 		 * After shift right and multiplication by 1000,
20594b0715f0SLars Ellenberg 		 * this should still fit easily into a 32bit long,
20604b0715f0SLars Ellenberg 		 * so we don't need a 64bit division on 32bit arch.
20614b0715f0SLars Ellenberg 		 * Note: currently we don't support such large bitmaps on 32bit
20624b0715f0SLars Ellenberg 		 * arch anyways, but no harm done to be prepared for it here.
20634b0715f0SLars Ellenberg 		 */
20643b7cd457SDavid Howells 		unsigned int shift = mdev->rs_total > UINT_MAX ? 16 : 10;
20654b0715f0SLars Ellenberg 		unsigned long left = *bits_left >> shift;
20664b0715f0SLars Ellenberg 		unsigned long total = 1UL + (mdev->rs_total >> shift);
20674b0715f0SLars Ellenberg 		unsigned long tmp = 1000UL - left * 1000UL/total;
2068b411b363SPhilipp Reisner 		*per_mil_done = tmp;
2069b411b363SPhilipp Reisner 	}
2070b411b363SPhilipp Reisner }
2071b411b363SPhilipp Reisner 
2072b411b363SPhilipp Reisner 
2073b411b363SPhilipp Reisner /* this throttles on-the-fly application requests
2074b411b363SPhilipp Reisner  * according to max_buffers settings;
2075b411b363SPhilipp Reisner  * maybe re-implement using semaphores? */
2076b411b363SPhilipp Reisner static inline int drbd_get_max_buffers(struct drbd_conf *mdev)
2077b411b363SPhilipp Reisner {
207844ed167dSPhilipp Reisner 	struct net_conf *nc;
207944ed167dSPhilipp Reisner 	int mxb;
208044ed167dSPhilipp Reisner 
208144ed167dSPhilipp Reisner 	rcu_read_lock();
208244ed167dSPhilipp Reisner 	nc = rcu_dereference(mdev->tconn->net_conf);
208344ed167dSPhilipp Reisner 	mxb = nc ? nc->max_buffers : 1000000;  /* arbitrary limit on open requests */
208444ed167dSPhilipp Reisner 	rcu_read_unlock();
208544ed167dSPhilipp Reisner 
2086b411b363SPhilipp Reisner 	return mxb;
2087b411b363SPhilipp Reisner }
2088b411b363SPhilipp Reisner 
20893719094eSPhilipp Reisner static inline int drbd_state_is_stable(struct drbd_conf *mdev)
2090b411b363SPhilipp Reisner {
2091da9fbc27SPhilipp Reisner 	union drbd_dev_state s = mdev->state;
2092b411b363SPhilipp Reisner 
2093b411b363SPhilipp Reisner 	/* DO NOT add a default clause, we want the compiler to warn us
2094b411b363SPhilipp Reisner 	 * for any newly introduced state we may have forgotten to add here */
2095b411b363SPhilipp Reisner 
2096b411b363SPhilipp Reisner 	switch ((enum drbd_conns)s.conn) {
2097b411b363SPhilipp Reisner 	/* new io only accepted when there is no connection, ... */
2098b411b363SPhilipp Reisner 	case C_STANDALONE:
2099b411b363SPhilipp Reisner 	case C_WF_CONNECTION:
2100b411b363SPhilipp Reisner 	/* ... or there is a well established connection. */
2101b411b363SPhilipp Reisner 	case C_CONNECTED:
2102b411b363SPhilipp Reisner 	case C_SYNC_SOURCE:
2103b411b363SPhilipp Reisner 	case C_SYNC_TARGET:
2104b411b363SPhilipp Reisner 	case C_VERIFY_S:
2105b411b363SPhilipp Reisner 	case C_VERIFY_T:
2106b411b363SPhilipp Reisner 	case C_PAUSED_SYNC_S:
2107b411b363SPhilipp Reisner 	case C_PAUSED_SYNC_T:
210867531718SPhilipp Reisner 	case C_AHEAD:
210967531718SPhilipp Reisner 	case C_BEHIND:
21103719094eSPhilipp Reisner 		/* transitional states, IO allowed */
2111b411b363SPhilipp Reisner 	case C_DISCONNECTING:
2112b411b363SPhilipp Reisner 	case C_UNCONNECTED:
2113b411b363SPhilipp Reisner 	case C_TIMEOUT:
2114b411b363SPhilipp Reisner 	case C_BROKEN_PIPE:
2115b411b363SPhilipp Reisner 	case C_NETWORK_FAILURE:
2116b411b363SPhilipp Reisner 	case C_PROTOCOL_ERROR:
2117b411b363SPhilipp Reisner 	case C_TEAR_DOWN:
2118b411b363SPhilipp Reisner 	case C_WF_REPORT_PARAMS:
2119b411b363SPhilipp Reisner 	case C_STARTING_SYNC_S:
2120b411b363SPhilipp Reisner 	case C_STARTING_SYNC_T:
21213719094eSPhilipp Reisner 		break;
21223719094eSPhilipp Reisner 
21233719094eSPhilipp Reisner 		/* Allow IO in BM exchange states with new protocols */
2124b411b363SPhilipp Reisner 	case C_WF_BITMAP_S:
212531890f4aSPhilipp Reisner 		if (mdev->tconn->agreed_pro_version < 96)
21263719094eSPhilipp Reisner 			return 0;
21273719094eSPhilipp Reisner 		break;
21283719094eSPhilipp Reisner 
21293719094eSPhilipp Reisner 		/* no new io accepted in these states */
2130b411b363SPhilipp Reisner 	case C_WF_BITMAP_T:
2131b411b363SPhilipp Reisner 	case C_WF_SYNC_UUID:
2132b411b363SPhilipp Reisner 	case C_MASK:
2133b411b363SPhilipp Reisner 		/* not "stable" */
2134b411b363SPhilipp Reisner 		return 0;
2135b411b363SPhilipp Reisner 	}
2136b411b363SPhilipp Reisner 
2137b411b363SPhilipp Reisner 	switch ((enum drbd_disk_state)s.disk) {
2138b411b363SPhilipp Reisner 	case D_DISKLESS:
2139b411b363SPhilipp Reisner 	case D_INCONSISTENT:
2140b411b363SPhilipp Reisner 	case D_OUTDATED:
2141b411b363SPhilipp Reisner 	case D_CONSISTENT:
2142b411b363SPhilipp Reisner 	case D_UP_TO_DATE:
2143cdfda633SPhilipp Reisner 	case D_FAILED:
2144b411b363SPhilipp Reisner 		/* disk state is stable as well. */
2145b411b363SPhilipp Reisner 		break;
2146b411b363SPhilipp Reisner 
2147d942ae44SPhilipp Reisner 	/* no new io accepted during transitional states */
2148b411b363SPhilipp Reisner 	case D_ATTACHING:
2149b411b363SPhilipp Reisner 	case D_NEGOTIATING:
2150b411b363SPhilipp Reisner 	case D_UNKNOWN:
2151b411b363SPhilipp Reisner 	case D_MASK:
2152b411b363SPhilipp Reisner 		/* not "stable" */
2153b411b363SPhilipp Reisner 		return 0;
2154b411b363SPhilipp Reisner 	}
2155b411b363SPhilipp Reisner 
2156b411b363SPhilipp Reisner 	return 1;
2157b411b363SPhilipp Reisner }
2158b411b363SPhilipp Reisner 
21592aebfabbSPhilipp Reisner static inline int drbd_suspended(struct drbd_conf *mdev)
2160fb22c402SPhilipp Reisner {
21618e0af25fSPhilipp Reisner 	struct drbd_tconn *tconn = mdev->tconn;
21628e0af25fSPhilipp Reisner 
21638e0af25fSPhilipp Reisner 	return tconn->susp || tconn->susp_fen || tconn->susp_nod;
2164fb22c402SPhilipp Reisner }
2165fb22c402SPhilipp Reisner 
21661b881ef7SAndreas Gruenbacher static inline bool may_inc_ap_bio(struct drbd_conf *mdev)
2167b411b363SPhilipp Reisner {
2168b411b363SPhilipp Reisner 	int mxb = drbd_get_max_buffers(mdev);
2169b411b363SPhilipp Reisner 
21702aebfabbSPhilipp Reisner 	if (drbd_suspended(mdev))
21711b881ef7SAndreas Gruenbacher 		return false;
2172b411b363SPhilipp Reisner 	if (test_bit(SUSPEND_IO, &mdev->flags))
21731b881ef7SAndreas Gruenbacher 		return false;
2174b411b363SPhilipp Reisner 
2175b411b363SPhilipp Reisner 	/* to avoid potential deadlock or bitmap corruption,
2176b411b363SPhilipp Reisner 	 * in various places, we only allow new application io
2177b411b363SPhilipp Reisner 	 * to start during "stable" states. */
2178b411b363SPhilipp Reisner 
2179b411b363SPhilipp Reisner 	/* no new io accepted when attaching or detaching the disk */
21803719094eSPhilipp Reisner 	if (!drbd_state_is_stable(mdev))
21811b881ef7SAndreas Gruenbacher 		return false;
2182b411b363SPhilipp Reisner 
2183b411b363SPhilipp Reisner 	/* since some older kernels don't have atomic_add_unless,
2184b411b363SPhilipp Reisner 	 * and we are within the spinlock anyways, we have this workaround.  */
2185b411b363SPhilipp Reisner 	if (atomic_read(&mdev->ap_bio_cnt) > mxb)
21861b881ef7SAndreas Gruenbacher 		return false;
2187b411b363SPhilipp Reisner 	if (test_bit(BITMAP_IO, &mdev->flags))
21881b881ef7SAndreas Gruenbacher 		return false;
21891b881ef7SAndreas Gruenbacher 	return true;
2190b411b363SPhilipp Reisner }
2191b411b363SPhilipp Reisner 
219223361cf3SLars Ellenberg static inline bool inc_ap_bio_cond(struct drbd_conf *mdev)
21938869d683SPhilipp Reisner {
21941b881ef7SAndreas Gruenbacher 	bool rv = false;
21958869d683SPhilipp Reisner 
219687eeee41SPhilipp Reisner 	spin_lock_irq(&mdev->tconn->req_lock);
21971b881ef7SAndreas Gruenbacher 	rv = may_inc_ap_bio(mdev);
21988869d683SPhilipp Reisner 	if (rv)
219923361cf3SLars Ellenberg 		atomic_inc(&mdev->ap_bio_cnt);
220087eeee41SPhilipp Reisner 	spin_unlock_irq(&mdev->tconn->req_lock);
22018869d683SPhilipp Reisner 
22028869d683SPhilipp Reisner 	return rv;
22038869d683SPhilipp Reisner }
22048869d683SPhilipp Reisner 
220523361cf3SLars Ellenberg static inline void inc_ap_bio(struct drbd_conf *mdev)
2206b411b363SPhilipp Reisner {
2207b411b363SPhilipp Reisner 	/* we wait here
2208b411b363SPhilipp Reisner 	 *    as long as the device is suspended
2209b411b363SPhilipp Reisner 	 *    until the bitmap is no longer on the fly during connection
2210d942ae44SPhilipp Reisner 	 *    handshake as long as we would exceed the max_buffer limit.
2211b411b363SPhilipp Reisner 	 *
2212b411b363SPhilipp Reisner 	 * to avoid races with the reconnect code,
2213b411b363SPhilipp Reisner 	 * we need to atomic_inc within the spinlock. */
2214b411b363SPhilipp Reisner 
221523361cf3SLars Ellenberg 	wait_event(mdev->misc_wait, inc_ap_bio_cond(mdev));
2216b411b363SPhilipp Reisner }
2217b411b363SPhilipp Reisner 
2218b411b363SPhilipp Reisner static inline void dec_ap_bio(struct drbd_conf *mdev)
2219b411b363SPhilipp Reisner {
2220b411b363SPhilipp Reisner 	int mxb = drbd_get_max_buffers(mdev);
2221b411b363SPhilipp Reisner 	int ap_bio = atomic_dec_return(&mdev->ap_bio_cnt);
2222b411b363SPhilipp Reisner 
2223b411b363SPhilipp Reisner 	D_ASSERT(ap_bio >= 0);
2224b411b363SPhilipp Reisner 	/* this currently does wake_up for every dec_ap_bio!
2225b411b363SPhilipp Reisner 	 * maybe rather introduce some type of hysteresis?
2226b411b363SPhilipp Reisner 	 * e.g. (ap_bio == mxb/2 || ap_bio == 0) ? */
2227b411b363SPhilipp Reisner 	if (ap_bio < mxb)
2228b411b363SPhilipp Reisner 		wake_up(&mdev->misc_wait);
2229b411b363SPhilipp Reisner 	if (ap_bio == 0 && test_bit(BITMAP_IO, &mdev->flags)) {
2230b411b363SPhilipp Reisner 		if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
2231*d5b27b01SLars Ellenberg 			drbd_queue_work(&mdev->tconn->sender_work, &mdev->bm_io_work.w);
2232b411b363SPhilipp Reisner 	}
2233b411b363SPhilipp Reisner }
2234b411b363SPhilipp Reisner 
223562b0da3aSLars Ellenberg static inline int drbd_set_ed_uuid(struct drbd_conf *mdev, u64 val)
2236b411b363SPhilipp Reisner {
223762b0da3aSLars Ellenberg 	int changed = mdev->ed_uuid != val;
2238b411b363SPhilipp Reisner 	mdev->ed_uuid = val;
223962b0da3aSLars Ellenberg 	return changed;
2240b411b363SPhilipp Reisner }
2241b411b363SPhilipp Reisner 
2242b411b363SPhilipp Reisner static inline int drbd_queue_order_type(struct drbd_conf *mdev)
2243b411b363SPhilipp Reisner {
2244b411b363SPhilipp Reisner 	/* sorry, we currently have no working implementation
2245b411b363SPhilipp Reisner 	 * of distributed TCQ stuff */
2246b411b363SPhilipp Reisner #ifndef QUEUE_ORDERED_NONE
2247b411b363SPhilipp Reisner #define QUEUE_ORDERED_NONE 0
2248b411b363SPhilipp Reisner #endif
2249b411b363SPhilipp Reisner 	return QUEUE_ORDERED_NONE;
2250b411b363SPhilipp Reisner }
2251b411b363SPhilipp Reisner 
2252b411b363SPhilipp Reisner static inline void drbd_md_flush(struct drbd_conf *mdev)
2253b411b363SPhilipp Reisner {
2254b411b363SPhilipp Reisner 	int r;
2255b411b363SPhilipp Reisner 
2256a8a4e51eSPhilipp Reisner 	if (test_bit(MD_NO_FUA, &mdev->flags))
2257b411b363SPhilipp Reisner 		return;
2258b411b363SPhilipp Reisner 
2259dd3932edSChristoph Hellwig 	r = blkdev_issue_flush(mdev->ldev->md_bdev, GFP_KERNEL, NULL);
2260b411b363SPhilipp Reisner 	if (r) {
2261a8a4e51eSPhilipp Reisner 		set_bit(MD_NO_FUA, &mdev->flags);
2262b411b363SPhilipp Reisner 		dev_err(DEV, "meta data flush failed with status %d, disabling md-flushes\n", r);
2263b411b363SPhilipp Reisner 	}
2264b411b363SPhilipp Reisner }
2265b411b363SPhilipp Reisner 
2266b411b363SPhilipp Reisner #endif
2267