xref: /linux/drivers/block/drbd/drbd_int.h (revision c771600c6af14749609b49565ffb4cac2959710d)
193c68cc4SChristoph Böhmwalder /* SPDX-License-Identifier: GPL-2.0-only */
2b411b363SPhilipp Reisner /*
3b411b363SPhilipp Reisner   drbd_int.h
4b411b363SPhilipp Reisner 
5b411b363SPhilipp Reisner   This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6b411b363SPhilipp Reisner 
7b411b363SPhilipp Reisner   Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
8b411b363SPhilipp Reisner   Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
9b411b363SPhilipp Reisner   Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10b411b363SPhilipp Reisner 
11b411b363SPhilipp Reisner 
12b411b363SPhilipp Reisner */
13b411b363SPhilipp Reisner 
14b411b363SPhilipp Reisner #ifndef _DRBD_INT_H
15b411b363SPhilipp Reisner #define _DRBD_INT_H
16b411b363SPhilipp Reisner 
179534d671SHerbert Xu #include <crypto/hash.h>
18b411b363SPhilipp Reisner #include <linux/compiler.h>
19b411b363SPhilipp Reisner #include <linux/types.h>
20b411b363SPhilipp Reisner #include <linux/list.h>
213f07c014SIngo Molnar #include <linux/sched/signal.h>
22b411b363SPhilipp Reisner #include <linux/bitops.h>
23b411b363SPhilipp Reisner #include <linux/slab.h>
24132cc538SRandy Dunlap #include <linux/ratelimit.h>
25b411b363SPhilipp Reisner #include <linux/tcp.h>
26b411b363SPhilipp Reisner #include <linux/mutex.h>
27b411b363SPhilipp Reisner #include <linux/major.h>
28b411b363SPhilipp Reisner #include <linux/blkdev.h>
2966114cadSTejun Heo #include <linux/backing-dev.h>
30062e879cSPhilipp Reisner #include <linux/idr.h>
319d5059c9SLuis de Bethencourt #include <linux/dynamic_debug.h>
32b411b363SPhilipp Reisner #include <net/tcp.h>
33b411b363SPhilipp Reisner #include <linux/lru_cache.h>
3470c71606SPaul Gortmaker #include <linux/prefetch.h>
353b98c0c2SLars Ellenberg #include <linux/drbd_genl_api.h>
36b8907339SPhilipp Reisner #include <linux/drbd.h>
3720f2a34aSChristoph Böhmwalder #include <linux/drbd_config.h>
38d9f65229SAndreas Gruenbacher #include "drbd_strings.h"
39b8907339SPhilipp Reisner #include "drbd_state.h"
40a3603a6eSAndreas Gruenbacher #include "drbd_protocol.h"
41136160c1SChristoph Böhmwalder #include "drbd_polymorph_printk.h"
42b411b363SPhilipp Reisner 
43183ece30SRoland Kammerer /* shared module parameters, defined in drbd_main.c */
44b411b363SPhilipp Reisner #ifdef CONFIG_DRBD_FAULT_INJECTION
45183ece30SRoland Kammerer extern int drbd_enable_faults;
46183ece30SRoland Kammerer extern int drbd_fault_rate;
47b411b363SPhilipp Reisner #endif
48b411b363SPhilipp Reisner 
49183ece30SRoland Kammerer extern unsigned int drbd_minor_count;
508ab761e1SGreg Kroah-Hartman extern char drbd_usermode_helper[];
51183ece30SRoland Kammerer extern int drbd_proc_details;
52b411b363SPhilipp Reisner 
53b411b363SPhilipp Reisner 
54b411b363SPhilipp Reisner /* This is used to stop/restart our threads.
55b411b363SPhilipp Reisner  * Cannot use SIGTERM nor SIGKILL, since these
56b411b363SPhilipp Reisner  * are sent out by init on runlevel changes
57b411b363SPhilipp Reisner  * I choose SIGHUP for now.
58b411b363SPhilipp Reisner  */
59b411b363SPhilipp Reisner #define DRBD_SIGKILL SIGHUP
60b411b363SPhilipp Reisner 
61b411b363SPhilipp Reisner #define ID_IN_SYNC      (4711ULL)
62b411b363SPhilipp Reisner #define ID_OUT_OF_SYNC  (4712ULL)
63b411b363SPhilipp Reisner #define ID_SYNCER (-1ULL)
64579b57edSAndreas Gruenbacher 
654a23f264SPhilipp Reisner #define UUID_NEW_BM_OFFSET ((u64)0x0001000000000000ULL)
66b411b363SPhilipp Reisner 
6754761697SAndreas Gruenbacher struct drbd_device;
68bde89a9eSAndreas Gruenbacher struct drbd_connection;
698164dd6cSAndreas Gruenbacher struct drbd_peer_device;
70b411b363SPhilipp Reisner 
71b411b363SPhilipp Reisner /* Defines to control fault insertion */
72b411b363SPhilipp Reisner enum {
73b411b363SPhilipp Reisner 	DRBD_FAULT_MD_WR = 0,	/* meta data write */
74b411b363SPhilipp Reisner 	DRBD_FAULT_MD_RD = 1,	/*           read  */
75b411b363SPhilipp Reisner 	DRBD_FAULT_RS_WR = 2,	/* resync          */
76b411b363SPhilipp Reisner 	DRBD_FAULT_RS_RD = 3,
77b411b363SPhilipp Reisner 	DRBD_FAULT_DT_WR = 4,	/* data            */
78b411b363SPhilipp Reisner 	DRBD_FAULT_DT_RD = 5,
79b411b363SPhilipp Reisner 	DRBD_FAULT_DT_RA = 6,	/* data read ahead */
80b411b363SPhilipp Reisner 	DRBD_FAULT_BM_ALLOC = 7,	/* bitmap allocation */
81b411b363SPhilipp Reisner 	DRBD_FAULT_AL_EE = 8,	/* alloc ee */
826b4388acSPhilipp Reisner 	DRBD_FAULT_RECEIVE = 9, /* Changes some bytes upon receiving a [rs]data block */
83b411b363SPhilipp Reisner 
84b411b363SPhilipp Reisner 	DRBD_FAULT_MAX,
85b411b363SPhilipp Reisner };
86b411b363SPhilipp Reisner 
87b411b363SPhilipp Reisner extern unsigned int
88b30ab791SAndreas Gruenbacher _drbd_insert_fault(struct drbd_device *device, unsigned int type);
890cf9d27eSAndreas Gruenbacher 
90b411b363SPhilipp Reisner static inline int
drbd_insert_fault(struct drbd_device * device,unsigned int type)91b30ab791SAndreas Gruenbacher drbd_insert_fault(struct drbd_device *device, unsigned int type) {
920cf9d27eSAndreas Gruenbacher #ifdef CONFIG_DRBD_FAULT_INJECTION
93183ece30SRoland Kammerer 	return drbd_fault_rate &&
94183ece30SRoland Kammerer 		(drbd_enable_faults & (1<<type)) &&
95b30ab791SAndreas Gruenbacher 		_drbd_insert_fault(device, type);
96b411b363SPhilipp Reisner #else
970cf9d27eSAndreas Gruenbacher 	return 0;
98b411b363SPhilipp Reisner #endif
990cf9d27eSAndreas Gruenbacher }
100b411b363SPhilipp Reisner 
101b411b363SPhilipp Reisner /* integer division, round _UP_ to the next integer */
102b411b363SPhilipp Reisner #define div_ceil(A, B) ((A)/(B) + ((A)%(B) ? 1 : 0))
103b411b363SPhilipp Reisner /* usual integer division */
104b411b363SPhilipp Reisner #define div_floor(A, B) ((A)/(B))
105b411b363SPhilipp Reisner 
106b411b363SPhilipp Reisner extern struct ratelimit_state drbd_ratelimit_state;
10705a10ec7SAndreas Gruenbacher extern struct idr drbd_devices; /* RCU, updates: genl_lock() */
10877c556f6SAndreas Gruenbacher extern struct list_head drbd_resources; /* RCU, updates: genl_lock() */
109b411b363SPhilipp Reisner 
110d8763023SAndreas Gruenbacher extern const char *cmdname(enum drbd_packet cmd);
111b411b363SPhilipp Reisner 
112b411b363SPhilipp Reisner /* for sending/receiving the bitmap,
113b411b363SPhilipp Reisner  * possibly in some encoding scheme */
114b411b363SPhilipp Reisner struct bm_xfer_ctx {
115b411b363SPhilipp Reisner 	/* "const"
116b411b363SPhilipp Reisner 	 * stores total bits and long words
117b411b363SPhilipp Reisner 	 * of the bitmap, so we don't need to
118b411b363SPhilipp Reisner 	 * call the accessor functions over and again. */
119b411b363SPhilipp Reisner 	unsigned long bm_bits;
120b411b363SPhilipp Reisner 	unsigned long bm_words;
121b411b363SPhilipp Reisner 	/* during xfer, current position within the bitmap */
122b411b363SPhilipp Reisner 	unsigned long bit_offset;
123b411b363SPhilipp Reisner 	unsigned long word_offset;
124b411b363SPhilipp Reisner 
125b411b363SPhilipp Reisner 	/* statistics; index: (h->command == P_BITMAP) */
126b411b363SPhilipp Reisner 	unsigned packets[2];
127b411b363SPhilipp Reisner 	unsigned bytes[2];
128b411b363SPhilipp Reisner };
129b411b363SPhilipp Reisner 
1305e54c2a6SAndreas Gruenbacher extern void INFO_bm_xfer_stats(struct drbd_peer_device *peer_device,
131b411b363SPhilipp Reisner 			       const char *direction, struct bm_xfer_ctx *c);
132b411b363SPhilipp Reisner 
bm_xfer_ctx_bit_to_word_offset(struct bm_xfer_ctx * c)133b411b363SPhilipp Reisner static inline void bm_xfer_ctx_bit_to_word_offset(struct bm_xfer_ctx *c)
134b411b363SPhilipp Reisner {
135b411b363SPhilipp Reisner 	/* word_offset counts "native long words" (32 or 64 bit),
136b411b363SPhilipp Reisner 	 * aligned at 64 bit.
137b411b363SPhilipp Reisner 	 * Encoded packet may end at an unaligned bit offset.
138b411b363SPhilipp Reisner 	 * In case a fallback clear text packet is transmitted in
139b411b363SPhilipp Reisner 	 * between, we adjust this offset back to the last 64bit
140b411b363SPhilipp Reisner 	 * aligned "native long word", which makes coding and decoding
141b411b363SPhilipp Reisner 	 * the plain text bitmap much more convenient.  */
142b411b363SPhilipp Reisner #if BITS_PER_LONG == 64
143b411b363SPhilipp Reisner 	c->word_offset = c->bit_offset >> 6;
144b411b363SPhilipp Reisner #elif BITS_PER_LONG == 32
145b411b363SPhilipp Reisner 	c->word_offset = c->bit_offset >> 5;
146b411b363SPhilipp Reisner 	c->word_offset &= ~(1UL);
147b411b363SPhilipp Reisner #else
148b411b363SPhilipp Reisner # error "unsupported BITS_PER_LONG"
149b411b363SPhilipp Reisner #endif
150b411b363SPhilipp Reisner }
151b411b363SPhilipp Reisner 
152bde89a9eSAndreas Gruenbacher extern unsigned int drbd_header_size(struct drbd_connection *connection);
153b411b363SPhilipp Reisner 
154b411b363SPhilipp Reisner /**********************************************************************/
155b411b363SPhilipp Reisner enum drbd_thread_state {
156e77a0a5cSAndreas Gruenbacher 	NONE,
157e77a0a5cSAndreas Gruenbacher 	RUNNING,
158e77a0a5cSAndreas Gruenbacher 	EXITING,
159e77a0a5cSAndreas Gruenbacher 	RESTARTING
160b411b363SPhilipp Reisner };
161b411b363SPhilipp Reisner 
162b411b363SPhilipp Reisner struct drbd_thread {
163b411b363SPhilipp Reisner 	spinlock_t t_lock;
164b411b363SPhilipp Reisner 	struct task_struct *task;
165b411b363SPhilipp Reisner 	struct completion stop;
166b411b363SPhilipp Reisner 	enum drbd_thread_state t_state;
167b411b363SPhilipp Reisner 	int (*function) (struct drbd_thread *);
1682457b6d5SAndreas Gruenbacher 	struct drbd_resource *resource;
169bde89a9eSAndreas Gruenbacher 	struct drbd_connection *connection;
170b411b363SPhilipp Reisner 	int reset_cpu_mask;
171c60b0251SAndreas Gruenbacher 	const char *name;
172b411b363SPhilipp Reisner };
173b411b363SPhilipp Reisner 
get_t_state(struct drbd_thread * thi)174b411b363SPhilipp Reisner static inline enum drbd_thread_state get_t_state(struct drbd_thread *thi)
175b411b363SPhilipp Reisner {
176b411b363SPhilipp Reisner 	/* THINK testing the t_state seems to be uncritical in all cases
177b411b363SPhilipp Reisner 	 * (but thread_{start,stop}), so we can read it *without* the lock.
178b411b363SPhilipp Reisner 	 *	--lge */
179b411b363SPhilipp Reisner 
180b411b363SPhilipp Reisner 	smp_rmb();
181b411b363SPhilipp Reisner 	return thi->t_state;
182b411b363SPhilipp Reisner }
183b411b363SPhilipp Reisner 
184b411b363SPhilipp Reisner struct drbd_work {
185b411b363SPhilipp Reisner 	struct list_head list;
186309a8348SAndreas Gruenbacher 	int (*cb)(struct drbd_work *, int cancel);
18700d56944SPhilipp Reisner };
18884b8c06bSAndreas Gruenbacher 
18984b8c06bSAndreas Gruenbacher struct drbd_device_work {
19084b8c06bSAndreas Gruenbacher 	struct drbd_work w;
19184b8c06bSAndreas Gruenbacher 	struct drbd_device *device;
192b411b363SPhilipp Reisner };
193b411b363SPhilipp Reisner 
194ace652acSAndreas Gruenbacher #include "drbd_interval.h"
195ace652acSAndreas Gruenbacher 
19654761697SAndreas Gruenbacher extern int drbd_wait_misc(struct drbd_device *, struct drbd_interval *);
1977be8da07SAndreas Gruenbacher 
19828bc3b8cSAndreas Gruenbacher extern void lock_all_resources(void);
19928bc3b8cSAndreas Gruenbacher extern void unlock_all_resources(void);
20028bc3b8cSAndreas Gruenbacher 
201b411b363SPhilipp Reisner struct drbd_request {
202b411b363SPhilipp Reisner 	struct drbd_work w;
20384b8c06bSAndreas Gruenbacher 	struct drbd_device *device;
204b411b363SPhilipp Reisner 
205b411b363SPhilipp Reisner 	/* if local IO is not allowed, will be NULL.
206b411b363SPhilipp Reisner 	 * if local IO _is_ allowed, holds the locally submitted bio clone,
207b411b363SPhilipp Reisner 	 * or, after local IO completion, the ERR_PTR(error).
208fcefa62eSAndreas Gruenbacher 	 * see drbd_request_endio(). */
209b411b363SPhilipp Reisner 	struct bio *private_bio;
210b411b363SPhilipp Reisner 
211ace652acSAndreas Gruenbacher 	struct drbd_interval i;
212b411b363SPhilipp Reisner 
213b6dd1a89SLars Ellenberg 	/* epoch: used to check on "completion" whether this req was in
214b411b363SPhilipp Reisner 	 * the current epoch, and we therefore have to close it,
215b6dd1a89SLars Ellenberg 	 * causing a p_barrier packet to be send, starting a new epoch.
216b6dd1a89SLars Ellenberg 	 *
217b6dd1a89SLars Ellenberg 	 * This corresponds to "barrier" in struct p_barrier[_ack],
218b6dd1a89SLars Ellenberg 	 * and to "barrier_nr" in struct drbd_epoch (and various
219b6dd1a89SLars Ellenberg 	 * comments/function parameters/local variable names).
220b411b363SPhilipp Reisner 	 */
221b6dd1a89SLars Ellenberg 	unsigned int epoch;
222b411b363SPhilipp Reisner 
223b411b363SPhilipp Reisner 	struct list_head tl_requests; /* ring list in the transfer log */
224b411b363SPhilipp Reisner 	struct bio *master_bio;       /* master bio pointer */
225e5f891b2SLars Ellenberg 
226844a6ae7SLars Ellenberg 	/* see struct drbd_device */
227844a6ae7SLars Ellenberg 	struct list_head req_pending_master_completion;
228844a6ae7SLars Ellenberg 	struct list_head req_pending_local;
229844a6ae7SLars Ellenberg 
230e5f891b2SLars Ellenberg 	/* for generic IO accounting */
231e5f891b2SLars Ellenberg 	unsigned long start_jif;
232e5f891b2SLars Ellenberg 
233e5f891b2SLars Ellenberg 	/* for DRBD internal statistics */
234e5f891b2SLars Ellenberg 
235e5f891b2SLars Ellenberg 	/* Minimal set of time stamps to determine if we wait for activity log
236e5f891b2SLars Ellenberg 	 * transactions, local disk or peer.  32 bit "jiffies" are good enough,
237e5f891b2SLars Ellenberg 	 * we don't expect a DRBD request to be stalled for several month.
238e5f891b2SLars Ellenberg 	 */
239e5f891b2SLars Ellenberg 
240e5f891b2SLars Ellenberg 	/* before actual request processing */
241e5f891b2SLars Ellenberg 	unsigned long in_actlog_jif;
242e5f891b2SLars Ellenberg 
243e5f891b2SLars Ellenberg 	/* local disk */
244e5f891b2SLars Ellenberg 	unsigned long pre_submit_jif;
245e5f891b2SLars Ellenberg 
246e5f891b2SLars Ellenberg 	/* per connection */
247e5f891b2SLars Ellenberg 	unsigned long pre_send_jif;
248e5f891b2SLars Ellenberg 	unsigned long acked_jif;
249e5f891b2SLars Ellenberg 	unsigned long net_done_jif;
250e5f891b2SLars Ellenberg 
251e5f891b2SLars Ellenberg 	/* Possibly even more detail to track each phase:
252e5f891b2SLars Ellenberg 	 *  master_completion_jif
253e5f891b2SLars Ellenberg 	 *      how long did it take to complete the master bio
254e5f891b2SLars Ellenberg 	 *      (application visible latency)
255e5f891b2SLars Ellenberg 	 *  allocated_jif
256e5f891b2SLars Ellenberg 	 *      how long the master bio was blocked until we finally allocated
257e5f891b2SLars Ellenberg 	 *      a tracking struct
258e5f891b2SLars Ellenberg 	 *  in_actlog_jif
259e5f891b2SLars Ellenberg 	 *      how long did we wait for activity log transactions
260e5f891b2SLars Ellenberg 	 *
261e5f891b2SLars Ellenberg 	 *  net_queued_jif
262e5f891b2SLars Ellenberg 	 *      when did we finally queue it for sending
263e5f891b2SLars Ellenberg 	 *  pre_send_jif
264e5f891b2SLars Ellenberg 	 *      when did we start sending it
265e5f891b2SLars Ellenberg 	 *  post_send_jif
266e5f891b2SLars Ellenberg 	 *      how long did we block in the network stack trying to send it
267e5f891b2SLars Ellenberg 	 *  acked_jif
268e5f891b2SLars Ellenberg 	 *      when did we receive (or fake, in protocol A) a remote ACK
269e5f891b2SLars Ellenberg 	 *  net_done_jif
270e5f891b2SLars Ellenberg 	 *      when did we receive final acknowledgement (P_BARRIER_ACK),
271e5f891b2SLars Ellenberg 	 *      or decide, e.g. on connection loss, that we do no longer expect
272e5f891b2SLars Ellenberg 	 *      anything from this peer for this request.
273e5f891b2SLars Ellenberg 	 *
274e5f891b2SLars Ellenberg 	 *  pre_submit_jif
275e5f891b2SLars Ellenberg 	 *  post_sub_jif
276e5f891b2SLars Ellenberg 	 *      when did we start submiting to the lower level device,
277e5f891b2SLars Ellenberg 	 *      and how long did we block in that submit function
278e5f891b2SLars Ellenberg 	 *  local_completion_jif
279e5f891b2SLars Ellenberg 	 *      how long did it take the lower level device to complete this request
280e5f891b2SLars Ellenberg 	 */
281e5f891b2SLars Ellenberg 
282b406777eSLars Ellenberg 
283b406777eSLars Ellenberg 	/* once it hits 0, we may complete the master_bio */
284b406777eSLars Ellenberg 	atomic_t completion_ref;
285b406777eSLars Ellenberg 	/* once it hits 0, we may destroy this drbd_request object */
286b406777eSLars Ellenberg 	struct kref kref;
287a0d856dfSLars Ellenberg 
288a0d856dfSLars Ellenberg 	unsigned rq_state; /* see comments above _req_mod() */
289b411b363SPhilipp Reisner };
290b411b363SPhilipp Reisner 
291b411b363SPhilipp Reisner struct drbd_epoch {
292bde89a9eSAndreas Gruenbacher 	struct drbd_connection *connection;
293b411b363SPhilipp Reisner 	struct list_head list;
294b411b363SPhilipp Reisner 	unsigned int barrier_nr;
295b411b363SPhilipp Reisner 	atomic_t epoch_size; /* increased on every request added. */
296b411b363SPhilipp Reisner 	atomic_t active;     /* increased on every req. added, and dec on every finished. */
297b411b363SPhilipp Reisner 	unsigned long flags;
298b411b363SPhilipp Reisner };
299b411b363SPhilipp Reisner 
300b411b363SPhilipp Reisner /* drbd_epoch flag bits */
301b411b363SPhilipp Reisner enum {
302b411b363SPhilipp Reisner 	DE_HAVE_BARRIER_NUMBER,
303b411b363SPhilipp Reisner };
304b411b363SPhilipp Reisner 
305b411b363SPhilipp Reisner enum epoch_event {
306b411b363SPhilipp Reisner 	EV_PUT,
307b411b363SPhilipp Reisner 	EV_GOT_BARRIER_NR,
308b411b363SPhilipp Reisner 	EV_BECAME_LAST,
309b411b363SPhilipp Reisner 	EV_CLEANUP = 32, /* used as flag */
310b411b363SPhilipp Reisner };
311b411b363SPhilipp Reisner 
312b411b363SPhilipp Reisner struct digest_info {
313b411b363SPhilipp Reisner 	int digest_size;
314b411b363SPhilipp Reisner 	void *digest;
315b411b363SPhilipp Reisner };
316b411b363SPhilipp Reisner 
317f6ffca9fSAndreas Gruenbacher struct drbd_peer_request {
318a8cd15baSAndreas Gruenbacher 	struct drbd_work w;
319a8cd15baSAndreas Gruenbacher 	struct drbd_peer_device *peer_device;
32085719573SPhilipp Reisner 	struct drbd_epoch *epoch; /* for writes */
32145bb912bSLars Ellenberg 	struct page *pages;
322ce668b6dSChristoph Böhmwalder 	blk_opf_t opf;
32345bb912bSLars Ellenberg 	atomic_t pending_bios;
324010f6e67SAndreas Gruenbacher 	struct drbd_interval i;
32545bb912bSLars Ellenberg 	/* see comments on ee flag bits below */
32645bb912bSLars Ellenberg 	unsigned long flags;
32721ae5d7fSLars Ellenberg 	unsigned long submit_jif;
32885719573SPhilipp Reisner 	union {
32945bb912bSLars Ellenberg 		u64 block_id;
33085719573SPhilipp Reisner 		struct digest_info *digest;
33185719573SPhilipp Reisner 	};
33245bb912bSLars Ellenberg };
33345bb912bSLars Ellenberg 
334ce668b6dSChristoph Böhmwalder /* Equivalent to bio_op and req_op. */
335ce668b6dSChristoph Böhmwalder #define peer_req_op(peer_req) \
336ce668b6dSChristoph Böhmwalder 	((peer_req)->opf & REQ_OP_MASK)
337ce668b6dSChristoph Böhmwalder 
33845bb912bSLars Ellenberg /* ee flag bits.
33945bb912bSLars Ellenberg  * While corresponding bios are in flight, the only modification will be
34045bb912bSLars Ellenberg  * set_bit WAS_ERROR, which has to be atomic.
34145bb912bSLars Ellenberg  * If no bios are in flight yet, or all have been completed,
34245bb912bSLars Ellenberg  * non-atomic modification to ee->flags is ok.
34345bb912bSLars Ellenberg  */
344b411b363SPhilipp Reisner enum {
345b411b363SPhilipp Reisner 	__EE_CALL_AL_COMPLETE_IO,
346b411b363SPhilipp Reisner 	__EE_MAY_SET_IN_SYNC,
34745bb912bSLars Ellenberg 
3489305455aSBart Van Assche 	/* is this a TRIM aka REQ_OP_DISCARD? */
349f31e583aSLars Ellenberg 	__EE_TRIM,
350f31e583aSLars Ellenberg 	/* explicit zero-out requested, or
351f31e583aSLars Ellenberg 	 * our lower level cannot handle trim,
352f31e583aSLars Ellenberg 	 * and we want to fall back to zeroout instead */
353f31e583aSLars Ellenberg 	__EE_ZEROOUT,
354a0fb3c47SLars Ellenberg 
35545bb912bSLars Ellenberg 	/* In case a barrier failed,
35645bb912bSLars Ellenberg 	 * we need to resubmit without the barrier flag. */
35745bb912bSLars Ellenberg 	__EE_RESUBMITTED,
35845bb912bSLars Ellenberg 
3596c852becSAndreas Gruenbacher 	/* we may have several bios per peer request.
36045bb912bSLars Ellenberg 	 * if any of those fail, we set this flag atomically
36145bb912bSLars Ellenberg 	 * from the endio callback */
36245bb912bSLars Ellenberg 	__EE_WAS_ERROR,
363c36c3cedSLars Ellenberg 
364c36c3cedSLars Ellenberg 	/* This ee has a pointer to a digest instead of a block id */
365c36c3cedSLars Ellenberg 	__EE_HAS_DIGEST,
3667be8da07SAndreas Gruenbacher 
3677be8da07SAndreas Gruenbacher 	/* Conflicting local requests need to be restarted after this request */
3687be8da07SAndreas Gruenbacher 	__EE_RESTART_REQUESTS,
369303d1448SPhilipp Reisner 
370303d1448SPhilipp Reisner 	/* The peer wants a write ACK for this (wire proto C) */
371303d1448SPhilipp Reisner 	__EE_SEND_WRITE_ACK,
372302bdeaeSPhilipp Reisner 
373302bdeaeSPhilipp Reisner 	/* Is set when net_conf had two_primaries set while creating this peer_req */
374302bdeaeSPhilipp Reisner 	__EE_IN_INTERVAL_TREE,
37521ae5d7fSLars Ellenberg 
37621ae5d7fSLars Ellenberg 	/* for debugfs: */
37721ae5d7fSLars Ellenberg 	/* has this been submitted, or does it still wait for something else? */
37821ae5d7fSLars Ellenberg 	__EE_SUBMITTED,
37921ae5d7fSLars Ellenberg 
38021ae5d7fSLars Ellenberg 	/* this is/was a write request */
38121ae5d7fSLars Ellenberg 	__EE_WRITE,
38221ae5d7fSLars Ellenberg 
3839104d31aSLars Ellenberg 	/* this is/was a write same request */
3849104d31aSLars Ellenberg 	__EE_WRITE_SAME,
3859104d31aSLars Ellenberg 
38621ae5d7fSLars Ellenberg 	/* this originates from application on peer
38721ae5d7fSLars Ellenberg 	 * (not some resync or verify or other DRBD internal request) */
38821ae5d7fSLars Ellenberg 	__EE_APPLICATION,
389700ca8c0SPhilipp Reisner 
390700ca8c0SPhilipp Reisner 	/* If it contains only 0 bytes, send back P_RS_DEALLOCATED */
391700ca8c0SPhilipp Reisner 	__EE_RS_THIN_REQ,
392b411b363SPhilipp Reisner };
393b411b363SPhilipp Reisner #define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO)
394b411b363SPhilipp Reisner #define EE_MAY_SET_IN_SYNC     (1<<__EE_MAY_SET_IN_SYNC)
395f31e583aSLars Ellenberg #define EE_TRIM                (1<<__EE_TRIM)
396f31e583aSLars Ellenberg #define EE_ZEROOUT             (1<<__EE_ZEROOUT)
39745bb912bSLars Ellenberg #define EE_RESUBMITTED         (1<<__EE_RESUBMITTED)
39845bb912bSLars Ellenberg #define EE_WAS_ERROR           (1<<__EE_WAS_ERROR)
399c36c3cedSLars Ellenberg #define EE_HAS_DIGEST          (1<<__EE_HAS_DIGEST)
4007be8da07SAndreas Gruenbacher #define EE_RESTART_REQUESTS	(1<<__EE_RESTART_REQUESTS)
401303d1448SPhilipp Reisner #define EE_SEND_WRITE_ACK	(1<<__EE_SEND_WRITE_ACK)
402302bdeaeSPhilipp Reisner #define EE_IN_INTERVAL_TREE	(1<<__EE_IN_INTERVAL_TREE)
40321ae5d7fSLars Ellenberg #define EE_SUBMITTED		(1<<__EE_SUBMITTED)
40421ae5d7fSLars Ellenberg #define EE_WRITE		(1<<__EE_WRITE)
4059104d31aSLars Ellenberg #define EE_WRITE_SAME		(1<<__EE_WRITE_SAME)
40621ae5d7fSLars Ellenberg #define EE_APPLICATION		(1<<__EE_APPLICATION)
407700ca8c0SPhilipp Reisner #define EE_RS_THIN_REQ		(1<<__EE_RS_THIN_REQ)
408b411b363SPhilipp Reisner 
409b30ab791SAndreas Gruenbacher /* flag bits per device */
410b411b363SPhilipp Reisner enum {
411b411b363SPhilipp Reisner 	UNPLUG_REMOTE,		/* sending a "UnplugRemote" could help */
412b411b363SPhilipp Reisner 	MD_DIRTY,		/* current uuids and flags not yet on disk */
413b411b363SPhilipp Reisner 	USE_DEGR_WFC_T,		/* degr-wfc-timeout instead of wfc-timeout. */
414b411b363SPhilipp Reisner 	CL_ST_CHG_SUCCESS,
415b411b363SPhilipp Reisner 	CL_ST_CHG_FAIL,
416b411b363SPhilipp Reisner 	CRASHED_PRIMARY,	/* This node was a crashed primary.
417b411b363SPhilipp Reisner 				 * Gets cleared when the state.conn
418b411b363SPhilipp Reisner 				 * goes into C_CONNECTED state. */
419b411b363SPhilipp Reisner 	CONSIDER_RESYNC,
420b411b363SPhilipp Reisner 
421a8a4e51eSPhilipp Reisner 	MD_NO_FUA,		/* Users wants us to not use FUA/FLUSH on meta data dev */
4225ab7d2c0SLars Ellenberg 
423b411b363SPhilipp Reisner 	BITMAP_IO,		/* suspend application io;
424b411b363SPhilipp Reisner 				   once no more io in flight, start bitmap io */
425b411b363SPhilipp Reisner 	BITMAP_IO_QUEUED,       /* Started bitmap IO */
426a2a3c74fSLars Ellenberg 	WAS_IO_ERROR,		/* Local disk failed, returned IO error */
427a2a3c74fSLars Ellenberg 	WAS_READ_ERROR,		/* Local disk READ failed (set additionally to the above) */
428383606e0SLars Ellenberg 	FORCE_DETACH,		/* Force-detach from local disk, aborting any pending local IO */
429b411b363SPhilipp Reisner 	RESYNC_AFTER_NEG,       /* Resync after online grow after the attach&negotiate finished. */
430b411b363SPhilipp Reisner 	RESIZE_PENDING,		/* Size change detected locally, waiting for the response from
431b411b363SPhilipp Reisner 				 * the peer, if it changed there as well. */
43243a5182cSPhilipp Reisner 	NEW_CUR_UUID,		/* Create new current UUID when thawing IO */
4330778286aSPhilipp Reisner 	AL_SUSPENDED,		/* Activity logging is currently suspended. */
434370a43e7SPhilipp Reisner 	AHEAD_TO_SYNC_SOURCE,   /* Ahead -> SyncSource queued */
435e64a3294SPhilipp Reisner 	B_RS_H_DONE,		/* Before resync handler done (already executed) */
43608b165baSPhilipp Reisner 	DISCARD_MY_DATA,	/* discard_my_data flag per volume */
437380207d0SPhilipp Reisner 	READ_BALANCE_RR,
438e334f550SLars Ellenberg 
439f418815fSLars Ellenberg 	FLUSH_PENDING,		/* if set, device->flush_jif is when we submitted that flush
440f418815fSLars Ellenberg 				 * from drbd_flush_after_epoch() */
441f418815fSLars Ellenberg 
442e334f550SLars Ellenberg 	/* cleared only after backing device related structures have been destroyed. */
443e334f550SLars Ellenberg 	GOING_DISKLESS,		/* Disk is being detached, because of io-error, or admin request. */
444e334f550SLars Ellenberg 
445e334f550SLars Ellenberg 	/* to be used in drbd_device_post_work() */
446e334f550SLars Ellenberg 	GO_DISKLESS,		/* tell worker to schedule cleanup before detach */
447e334f550SLars Ellenberg 	DESTROY_DISK,		/* tell worker to close backing devices and destroy related structures. */
448ac0acb9eSLars Ellenberg 	MD_SYNC,		/* tell worker to call drbd_md_sync() */
449ac0acb9eSLars Ellenberg 	RS_START,		/* tell worker to start resync/OV */
450e334f550SLars Ellenberg 	RS_PROGRESS,		/* tell worker that resync made significant progress */
451e334f550SLars Ellenberg 	RS_DONE,		/* tell worker that resync is done */
452b411b363SPhilipp Reisner };
453b411b363SPhilipp Reisner 
45454761697SAndreas Gruenbacher struct drbd_bitmap; /* opaque for drbd_device */
455b411b363SPhilipp Reisner 
45620ceb2b2SLars Ellenberg /* definition of bits in bm_flags to be used in drbd_bm_lock
45720ceb2b2SLars Ellenberg  * and drbd_bitmap_io and friends. */
45820ceb2b2SLars Ellenberg enum bm_flag {
45920ceb2b2SLars Ellenberg 	/* currently locked for bulk operation */
4600e8488adSLars Ellenberg 	BM_LOCKED_MASK = 0xf,
46120ceb2b2SLars Ellenberg 
46220ceb2b2SLars Ellenberg 	/* in detail, that is: */
46320ceb2b2SLars Ellenberg 	BM_DONT_CLEAR = 0x1,
46420ceb2b2SLars Ellenberg 	BM_DONT_SET   = 0x2,
46520ceb2b2SLars Ellenberg 	BM_DONT_TEST  = 0x4,
46620ceb2b2SLars Ellenberg 
4670e8488adSLars Ellenberg 	/* so we can mark it locked for bulk operation,
4680e8488adSLars Ellenberg 	 * and still allow all non-bulk operations */
4690e8488adSLars Ellenberg 	BM_IS_LOCKED  = 0x8,
4700e8488adSLars Ellenberg 
47120ceb2b2SLars Ellenberg 	/* (test bit, count bit) allowed (common case) */
4720e8488adSLars Ellenberg 	BM_LOCKED_TEST_ALLOWED = BM_DONT_CLEAR | BM_DONT_SET | BM_IS_LOCKED,
47320ceb2b2SLars Ellenberg 
47420ceb2b2SLars Ellenberg 	/* testing bits, as well as setting new bits allowed, but clearing bits
47520ceb2b2SLars Ellenberg 	 * would be unexpected.  Used during bitmap receive.  Setting new bits
47620ceb2b2SLars Ellenberg 	 * requires sending of "out-of-sync" information, though. */
4770e8488adSLars Ellenberg 	BM_LOCKED_SET_ALLOWED = BM_DONT_CLEAR | BM_IS_LOCKED,
47820ceb2b2SLars Ellenberg 
4790e8488adSLars Ellenberg 	/* for drbd_bm_write_copy_pages, everything is allowed,
4800e8488adSLars Ellenberg 	 * only concurrent bulk operations are locked out. */
4810e8488adSLars Ellenberg 	BM_LOCKED_CHANGE_ALLOWED = BM_IS_LOCKED,
48220ceb2b2SLars Ellenberg };
48320ceb2b2SLars Ellenberg 
484b411b363SPhilipp Reisner struct drbd_work_queue {
485b411b363SPhilipp Reisner 	struct list_head q;
486b411b363SPhilipp Reisner 	spinlock_t q_lock;  /* to protect the list. */
4878c0785a5SLars Ellenberg 	wait_queue_head_t q_wait;
488b411b363SPhilipp Reisner };
489b411b363SPhilipp Reisner 
490b411b363SPhilipp Reisner struct drbd_socket {
491b411b363SPhilipp Reisner 	struct mutex mutex;
492b411b363SPhilipp Reisner 	struct socket    *socket;
493b411b363SPhilipp Reisner 	/* this way we get our
494b411b363SPhilipp Reisner 	 * send/receive buffers off the stack */
4955a87d920SAndreas Gruenbacher 	void *sbuf;
496e6ef8a5cSAndreas Gruenbacher 	void *rbuf;
497b411b363SPhilipp Reisner };
498b411b363SPhilipp Reisner 
499b411b363SPhilipp Reisner struct drbd_md {
500b411b363SPhilipp Reisner 	u64 md_offset;		/* sector offset to 'super' block */
501b411b363SPhilipp Reisner 
502b411b363SPhilipp Reisner 	u64 la_size_sect;	/* last agreed size, unit sectors */
5039f2247bbSPhilipp Reisner 	spinlock_t uuid_lock;
504b411b363SPhilipp Reisner 	u64 uuid[UI_SIZE];
505b411b363SPhilipp Reisner 	u64 device_uuid;
506b411b363SPhilipp Reisner 	u32 flags;
507b411b363SPhilipp Reisner 	u32 md_size_sect;
508b411b363SPhilipp Reisner 
509ae8bf312SLars Ellenberg 	s32 al_offset;	/* signed relative sector offset to activity log */
510b411b363SPhilipp Reisner 	s32 bm_offset;	/* signed relative sector offset to bitmap */
5113a4d4eb3SLars Ellenberg 
5123a4d4eb3SLars Ellenberg 	/* cached value of bdev->disk_conf->meta_dev_idx (see below) */
5133a4d4eb3SLars Ellenberg 	s32 meta_dev_idx;
5143a4d4eb3SLars Ellenberg 
5153a4d4eb3SLars Ellenberg 	/* see al_tr_number_to_on_disk_sector() */
5163a4d4eb3SLars Ellenberg 	u32 al_stripes;
5173a4d4eb3SLars Ellenberg 	u32 al_stripe_size_4k;
5183a4d4eb3SLars Ellenberg 	u32 al_size_4k; /* cached product of the above */
519b411b363SPhilipp Reisner };
520b411b363SPhilipp Reisner 
521b411b363SPhilipp Reisner struct drbd_backing_dev {
522b411b363SPhilipp Reisner 	struct block_device *backing_bdev;
52320e6a8d0SChristian Brauner 	struct file *backing_bdev_file;
524b411b363SPhilipp Reisner 	struct block_device *md_bdev;
52520e6a8d0SChristian Brauner 	struct file *f_md_bdev;
526b411b363SPhilipp Reisner 	struct drbd_md md;
5270500813fSAndreas Gruenbacher 	struct disk_conf *disk_conf; /* RCU, for updates: resource->conf_update */
528b411b363SPhilipp Reisner 	sector_t known_size; /* last known size of that backing device */
529b411b363SPhilipp Reisner };
530b411b363SPhilipp Reisner 
531b411b363SPhilipp Reisner struct drbd_md_io {
532e37d2438SLars Ellenberg 	struct page *page;
533e37d2438SLars Ellenberg 	unsigned long start_jif;	/* last call to drbd_md_get_buffer */
534e37d2438SLars Ellenberg 	unsigned long submit_jif;	/* last _drbd_md_sync_page_io() submit */
535e37d2438SLars Ellenberg 	const char *current_use;
536e37d2438SLars Ellenberg 	atomic_t in_use;
5370c464425SPhilipp Reisner 	unsigned int done;
538b411b363SPhilipp Reisner 	int error;
539b411b363SPhilipp Reisner };
540b411b363SPhilipp Reisner 
541b411b363SPhilipp Reisner struct bm_io_work {
542b411b363SPhilipp Reisner 	struct drbd_work w;
5438164dd6cSAndreas Gruenbacher 	struct drbd_peer_device *peer_device;
544b411b363SPhilipp Reisner 	char *why;
54520ceb2b2SLars Ellenberg 	enum bm_flag flags;
5468164dd6cSAndreas Gruenbacher 	int (*io_fn)(struct drbd_device *device, struct drbd_peer_device *peer_device);
547b30ab791SAndreas Gruenbacher 	void (*done)(struct drbd_device *device, int rv);
548b411b363SPhilipp Reisner };
549b411b363SPhilipp Reisner 
550778f271dSPhilipp Reisner struct fifo_buffer {
551778f271dSPhilipp Reisner 	unsigned int head_index;
552778f271dSPhilipp Reisner 	unsigned int size;
5539958c857SPhilipp Reisner 	int total; /* sum of all values */
554150849c5SKees Cook 	int values[] __counted_by(size);
555778f271dSPhilipp Reisner };
5566a365874SStephen Kitt extern struct fifo_buffer *fifo_alloc(unsigned int fifo_size);
557778f271dSPhilipp Reisner 
558bde89a9eSAndreas Gruenbacher /* flag bits per connection */
55901a311a5SPhilipp Reisner enum {
56001a311a5SPhilipp Reisner 	NET_CONGESTED,		/* The data socket is congested */
561427c0434SLars Ellenberg 	RESOLVE_CONFLICTS,	/* Set on one node, cleared on the peer! */
562668700b4SPhilipp Reisner 	SEND_PING,
5632a67d8b9SPhilipp Reisner 	GOT_PING_ACK,		/* set when we receive a ping_ack packet, ping_wait gets woken */
5644d0fc3fdSPhilipp Reisner 	CONN_WD_ST_CHG_REQ,	/* A cluster wide state change on the connection is active */
565fc3b10a4SPhilipp Reisner 	CONN_WD_ST_CHG_OKAY,
566fc3b10a4SPhilipp Reisner 	CONN_WD_ST_CHG_FAIL,
5678169e41bSPhilipp Reisner 	CONN_DRY_RUN,		/* Expect disconnect after resync handshake. */
5686936fcb4SPhilipp Reisner 	CREATE_BARRIER,		/* next P_DATA is preceded by a P_BARRIER */
569a1096a6eSPhilipp Reisner 	STATE_SENT,		/* Do not change state/UUIDs while this is set */
5706f3465edSLars Ellenberg 	CALLBACK_PENDING,	/* Whether we have a call_usermodehelper(, UMH_WAIT_PROC)
5716f3465edSLars Ellenberg 				 * pending, from drbd worker context.
5726f3465edSLars Ellenberg 				 */
573b66623e3SPhilipp Reisner 	DISCONNECT_SENT,
574e334f550SLars Ellenberg 
575e334f550SLars Ellenberg 	DEVICE_WORK_PENDING,	/* tell worker that some device has pending work */
57601a311a5SPhilipp Reisner };
57701a311a5SPhilipp Reisner 
578a2972846SAndreas Gruenbacher enum which_state { NOW, OLD = NOW, NEW };
579a2972846SAndreas Gruenbacher 
58077c556f6SAndreas Gruenbacher struct drbd_resource {
58177c556f6SAndreas Gruenbacher 	char *name;
5824d3d5aa8SLars Ellenberg #ifdef CONFIG_DEBUG_FS
5834d3d5aa8SLars Ellenberg 	struct dentry *debugfs_res;
5844d3d5aa8SLars Ellenberg 	struct dentry *debugfs_res_volumes;
5854d3d5aa8SLars Ellenberg 	struct dentry *debugfs_res_connections;
5864d3d5aa8SLars Ellenberg 	struct dentry *debugfs_res_in_flight_summary;
5874d3d5aa8SLars Ellenberg #endif
58877c556f6SAndreas Gruenbacher 	struct kref kref;
589803ea134SAndreas Gruenbacher 	struct idr devices;		/* volume number to device mapping */
59077c556f6SAndreas Gruenbacher 	struct list_head connections;
59177c556f6SAndreas Gruenbacher 	struct list_head resources;
592eb6bea67SAndreas Gruenbacher 	struct res_opts res_opts;
5930500813fSAndreas Gruenbacher 	struct mutex conf_update;	/* mutex for ready-copy-update of net_conf and disk_conf */
5949e276872SLars Ellenberg 	struct mutex adm_mutex;		/* mutex to serialize administrative requests */
5950500813fSAndreas Gruenbacher 	spinlock_t req_lock;
5966bbf53caSAndreas Gruenbacher 
5976bbf53caSAndreas Gruenbacher 	unsigned susp:1;		/* IO suspended by user */
5986bbf53caSAndreas Gruenbacher 	unsigned susp_nod:1;		/* IO suspended because no data */
5996bbf53caSAndreas Gruenbacher 	unsigned susp_fen:1;		/* IO suspended because fence peer handler runs */
600625a6ba2SAndreas Gruenbacher 
601e9526580SPhilipp Reisner 	enum write_ordering_e write_ordering;
602e9526580SPhilipp Reisner 
603625a6ba2SAndreas Gruenbacher 	cpumask_var_t cpu_mask;
60477c556f6SAndreas Gruenbacher };
60577c556f6SAndreas Gruenbacher 
606944410e9SLars Ellenberg struct drbd_thread_timing_details
607944410e9SLars Ellenberg {
608944410e9SLars Ellenberg 	unsigned long start_jif;
609944410e9SLars Ellenberg 	void *cb_addr;
610944410e9SLars Ellenberg 	const char *caller_fn;
611944410e9SLars Ellenberg 	unsigned int line;
612944410e9SLars Ellenberg 	unsigned int cb_nr;
613944410e9SLars Ellenberg };
614944410e9SLars Ellenberg 
61577c556f6SAndreas Gruenbacher struct drbd_connection {
61677c556f6SAndreas Gruenbacher 	struct list_head connections;
61777c556f6SAndreas Gruenbacher 	struct drbd_resource *resource;
6184d3d5aa8SLars Ellenberg #ifdef CONFIG_DEBUG_FS
6194d3d5aa8SLars Ellenberg 	struct dentry *debugfs_conn;
6204d3d5aa8SLars Ellenberg 	struct dentry *debugfs_conn_callback_history;
6214d3d5aa8SLars Ellenberg 	struct dentry *debugfs_conn_oldest_requests;
6224d3d5aa8SLars Ellenberg #endif
6239dc9fbb3SPhilipp Reisner 	struct kref kref;
624c06ece6bSAndreas Gruenbacher 	struct idr peer_devices;	/* volume number to peer device mapping */
625bbeb641cSPhilipp Reisner 	enum drbd_conns cstate;		/* Only C_STANDALONE to C_WF_REPORT_PARAMS */
6268410da8fSPhilipp Reisner 	struct mutex cstate_mutex;	/* Protects graceful disconnects */
62728e448bbSPhilipp Reisner 	unsigned int connect_cnt;	/* Inc each time a connection is established */
6282111438bSPhilipp Reisner 
629062e879cSPhilipp Reisner 	unsigned long flags;
63044ed167dSPhilipp Reisner 	struct net_conf *net_conf;	/* content protected by rcu */
6312a67d8b9SPhilipp Reisner 	wait_queue_head_t ping_wait;	/* Woken upon reception of a ping, and a state change */
632e42325a5SPhilipp Reisner 
633089c075dSAndreas Gruenbacher 	struct sockaddr_storage my_addr;
634089c075dSAndreas Gruenbacher 	int my_addr_len;
635089c075dSAndreas Gruenbacher 	struct sockaddr_storage peer_addr;
636089c075dSAndreas Gruenbacher 	int peer_addr_len;
637089c075dSAndreas Gruenbacher 
638e42325a5SPhilipp Reisner 	struct drbd_socket data;	/* data/barrier/cstate/parameter packets */
639e42325a5SPhilipp Reisner 	struct drbd_socket meta;	/* ping/ack (metadata) packets */
64031890f4aSPhilipp Reisner 	int agreed_pro_version;		/* actually used protocol version */
64120c68fdeSLars Ellenberg 	u32 agreed_features;
64231890f4aSPhilipp Reisner 	unsigned long last_received;	/* in jiffies, either socket */
64331890f4aSPhilipp Reisner 	unsigned int ko_count;
644e6b3ea83SPhilipp Reisner 
645b6dd1a89SLars Ellenberg 	struct list_head transfer_log;	/* all requests not yet fully processed */
64687eeee41SPhilipp Reisner 
6479534d671SHerbert Xu 	struct crypto_shash *cram_hmac_tfm;
6483d0e6375SKees Cook 	struct crypto_shash *integrity_tfm;  /* checksums we compute, updates protected by connection->data->mutex */
6493d0e6375SKees Cook 	struct crypto_shash *peer_integrity_tfm;  /* checksums we verify, only accessed from receiver thread  */
6503d0e6375SKees Cook 	struct crypto_shash *csums_tfm;
6513d0e6375SKees Cook 	struct crypto_shash *verify_tfm;
652a0638456SPhilipp Reisner 	void *int_dig_in;
653a0638456SPhilipp Reisner 	void *int_dig_vv;
654a0638456SPhilipp Reisner 
655b6dd1a89SLars Ellenberg 	/* receiver side */
65612038a3aSPhilipp Reisner 	struct drbd_epoch *current_epoch;
65712038a3aSPhilipp Reisner 	spinlock_t epoch_lock;
65812038a3aSPhilipp Reisner 	unsigned int epochs;
659b379c41eSLars Ellenberg 	atomic_t current_tle_nr;	/* transfer log epoch number */
660b6dd1a89SLars Ellenberg 	unsigned current_tle_writes;	/* writes seen within this tl epoch */
6614b0007c0SPhilipp Reisner 
66207be15b1SLars Ellenberg 	unsigned long last_reconnect_jif;
663c51a0ef3SLars Ellenberg 	/* empty member on older kernels without blk_start_plug() */
664c51a0ef3SLars Ellenberg 	struct blk_plug receiver_plug;
665e6b3ea83SPhilipp Reisner 	struct drbd_thread receiver;
666e6b3ea83SPhilipp Reisner 	struct drbd_thread worker;
6671c03e520SPhilipp Reisner 	struct drbd_thread ack_receiver;
668668700b4SPhilipp Reisner 	struct workqueue_struct *ack_sender;
669b6dd1a89SLars Ellenberg 
6707753a4c1SLars Ellenberg 	/* cached pointers,
6717753a4c1SLars Ellenberg 	 * so we can look up the oldest pending requests more quickly.
6727753a4c1SLars Ellenberg 	 * protected by resource->req_lock */
6737753a4c1SLars Ellenberg 	struct drbd_request *req_next; /* DRBD 9: todo.req_next */
6747753a4c1SLars Ellenberg 	struct drbd_request *req_ack_pending;
6757753a4c1SLars Ellenberg 	struct drbd_request *req_not_net_done;
6767753a4c1SLars Ellenberg 
677b6dd1a89SLars Ellenberg 	/* sender side */
678d5b27b01SLars Ellenberg 	struct drbd_work_queue sender_work;
679b6dd1a89SLars Ellenberg 
680944410e9SLars Ellenberg #define DRBD_THREAD_DETAILS_HIST	16
681944410e9SLars Ellenberg 	unsigned int w_cb_nr; /* keeps counting up */
682944410e9SLars Ellenberg 	unsigned int r_cb_nr; /* keeps counting up */
683944410e9SLars Ellenberg 	struct drbd_thread_timing_details w_timing_details[DRBD_THREAD_DETAILS_HIST];
684944410e9SLars Ellenberg 	struct drbd_thread_timing_details r_timing_details[DRBD_THREAD_DETAILS_HIST];
685944410e9SLars Ellenberg 
686b6dd1a89SLars Ellenberg 	struct {
68784d34f2fSLars Ellenberg 		unsigned long last_sent_barrier_jif;
68884d34f2fSLars Ellenberg 
689b6dd1a89SLars Ellenberg 		/* whether this sender thread
690b6dd1a89SLars Ellenberg 		 * has processed a single write yet. */
691b6dd1a89SLars Ellenberg 		bool seen_any_write_yet;
692b6dd1a89SLars Ellenberg 
693b6dd1a89SLars Ellenberg 		/* Which barrier number to send with the next P_BARRIER */
694b6dd1a89SLars Ellenberg 		int current_epoch_nr;
695b6dd1a89SLars Ellenberg 
696b6dd1a89SLars Ellenberg 		/* how many write requests have been sent
697b6dd1a89SLars Ellenberg 		 * with req->epoch == current_epoch_nr.
698b6dd1a89SLars Ellenberg 		 * If none, no P_BARRIER will be sent. */
699b6dd1a89SLars Ellenberg 		unsigned current_epoch_writes;
700b6dd1a89SLars Ellenberg 	} send;
701b411b363SPhilipp Reisner };
702b411b363SPhilipp Reisner 
has_net_conf(struct drbd_connection * connection)703a2972846SAndreas Gruenbacher static inline bool has_net_conf(struct drbd_connection *connection)
704a2972846SAndreas Gruenbacher {
705a2972846SAndreas Gruenbacher 	bool has_net_conf;
706a2972846SAndreas Gruenbacher 
707a2972846SAndreas Gruenbacher 	rcu_read_lock();
708a2972846SAndreas Gruenbacher 	has_net_conf = rcu_dereference(connection->net_conf);
709a2972846SAndreas Gruenbacher 	rcu_read_unlock();
710a2972846SAndreas Gruenbacher 
711a2972846SAndreas Gruenbacher 	return has_net_conf;
712a2972846SAndreas Gruenbacher }
713a2972846SAndreas Gruenbacher 
714944410e9SLars Ellenberg void __update_timing_details(
715944410e9SLars Ellenberg 		struct drbd_thread_timing_details *tdp,
716944410e9SLars Ellenberg 		unsigned int *cb_nr,
717944410e9SLars Ellenberg 		void *cb,
718944410e9SLars Ellenberg 		const char *fn, const unsigned int line);
719944410e9SLars Ellenberg 
720944410e9SLars Ellenberg #define update_worker_timing_details(c, cb) \
721944410e9SLars Ellenberg 	__update_timing_details(c->w_timing_details, &c->w_cb_nr, cb, __func__ , __LINE__ )
722944410e9SLars Ellenberg #define update_receiver_timing_details(c, cb) \
723944410e9SLars Ellenberg 	__update_timing_details(c->r_timing_details, &c->r_cb_nr, cb, __func__ , __LINE__ )
724944410e9SLars Ellenberg 
725113fef9eSLars Ellenberg struct submit_worker {
726113fef9eSLars Ellenberg 	struct workqueue_struct *wq;
727113fef9eSLars Ellenberg 	struct work_struct worker;
728113fef9eSLars Ellenberg 
729844a6ae7SLars Ellenberg 	/* protected by ..->resource->req_lock */
730113fef9eSLars Ellenberg 	struct list_head writes;
731113fef9eSLars Ellenberg };
732113fef9eSLars Ellenberg 
733a6b32bc3SAndreas Gruenbacher struct drbd_peer_device {
734a6b32bc3SAndreas Gruenbacher 	struct list_head peer_devices;
735a6b32bc3SAndreas Gruenbacher 	struct drbd_device *device;
736bde89a9eSAndreas Gruenbacher 	struct drbd_connection *connection;
737668700b4SPhilipp Reisner 	struct work_struct send_acks_work;
7384d3d5aa8SLars Ellenberg #ifdef CONFIG_DEBUG_FS
7394d3d5aa8SLars Ellenberg 	struct dentry *debugfs_peer_dev;
7404d3d5aa8SLars Ellenberg #endif
741a6b32bc3SAndreas Gruenbacher };
742a6b32bc3SAndreas Gruenbacher 
743a6b32bc3SAndreas Gruenbacher struct drbd_device {
744d8628a86SAndreas Gruenbacher 	struct drbd_resource *resource;
745a6b32bc3SAndreas Gruenbacher 	struct list_head peer_devices;
7464ce49266SLars Ellenberg 	struct list_head pending_bitmap_io;
7474d3d5aa8SLars Ellenberg 
7484d3d5aa8SLars Ellenberg 	unsigned long flush_jif;
7494d3d5aa8SLars Ellenberg #ifdef CONFIG_DEBUG_FS
7504d3d5aa8SLars Ellenberg 	struct dentry *debugfs_minor;
7514d3d5aa8SLars Ellenberg 	struct dentry *debugfs_vol;
7524d3d5aa8SLars Ellenberg 	struct dentry *debugfs_vol_oldest_requests;
7534d3d5aa8SLars Ellenberg 	struct dentry *debugfs_vol_act_log_extents;
7544d3d5aa8SLars Ellenberg 	struct dentry *debugfs_vol_resync_extents;
7554d3d5aa8SLars Ellenberg 	struct dentry *debugfs_vol_data_gen_id;
756f5ec0173SLars Ellenberg 	struct dentry *debugfs_vol_ed_gen_id;
7574d3d5aa8SLars Ellenberg #endif
7584d3d5aa8SLars Ellenberg 
7594d3d5aa8SLars Ellenberg 	unsigned int vnr;	/* volume number within the connection */
7604d3d5aa8SLars Ellenberg 	unsigned int minor;	/* device minor number */
7614d3d5aa8SLars Ellenberg 
76281fa2e67SPhilipp Reisner 	struct kref kref;
7632111438bSPhilipp Reisner 
764b411b363SPhilipp Reisner 	/* things that are stored as / read from meta data on disk */
765b411b363SPhilipp Reisner 	unsigned long flags;
766b411b363SPhilipp Reisner 
767b411b363SPhilipp Reisner 	/* configured by drbdsetup */
7689cf766a4SChristoph Böhmwalder 	struct drbd_backing_dev *ldev;
769b411b363SPhilipp Reisner 
770b411b363SPhilipp Reisner 	sector_t p_size;     /* partner's disk size */
771b411b363SPhilipp Reisner 	struct request_queue *rq_queue;
772b411b363SPhilipp Reisner 	struct gendisk	    *vdisk;
773b411b363SPhilipp Reisner 
77407be15b1SLars Ellenberg 	unsigned long last_reattach_jif;
77584b8c06bSAndreas Gruenbacher 	struct drbd_work resync_work;
77684b8c06bSAndreas Gruenbacher 	struct drbd_work unplug_work;
777b411b363SPhilipp Reisner 	struct timer_list resync_timer;
778b411b363SPhilipp Reisner 	struct timer_list md_sync_timer;
779370a43e7SPhilipp Reisner 	struct timer_list start_resync_timer;
7807fde2be9SPhilipp Reisner 	struct timer_list request_timer;
781b411b363SPhilipp Reisner 
782b411b363SPhilipp Reisner 	/* Used after attach while negotiating new disk state. */
783b411b363SPhilipp Reisner 	union drbd_state new_state_tmp;
784b411b363SPhilipp Reisner 
785da9fbc27SPhilipp Reisner 	union drbd_dev_state state;
786b411b363SPhilipp Reisner 	wait_queue_head_t misc_wait;
787b411b363SPhilipp Reisner 	wait_queue_head_t state_wait;  /* upon each state change. */
788b411b363SPhilipp Reisner 	unsigned int send_cnt;
789b411b363SPhilipp Reisner 	unsigned int recv_cnt;
790b411b363SPhilipp Reisner 	unsigned int read_cnt;
791b411b363SPhilipp Reisner 	unsigned int writ_cnt;
792b411b363SPhilipp Reisner 	unsigned int al_writ_cnt;
793b411b363SPhilipp Reisner 	unsigned int bm_writ_cnt;
794b411b363SPhilipp Reisner 	atomic_t ap_bio_cnt;	 /* Requests we need to complete */
795ad3fee79SLars Ellenberg 	atomic_t ap_actlog_cnt;  /* Requests waiting for activity log */
796b411b363SPhilipp Reisner 	atomic_t ap_pending_cnt; /* AP data packets on the wire, ack expected */
797b411b363SPhilipp Reisner 	atomic_t rs_pending_cnt; /* RS request/data packets on the wire */
798d942ae44SPhilipp Reisner 	atomic_t unacked_cnt;	 /* Need to send replies for */
799b411b363SPhilipp Reisner 	atomic_t local_cnt;	 /* Waiting for local completion */
8007dbb4386SPhilipp Reisner 	atomic_t suspend_cnt;
801b2fb6dbeSPhilipp Reisner 
802dac1389cSAndreas Gruenbacher 	/* Interval tree of pending local requests */
803dac1389cSAndreas Gruenbacher 	struct rb_root read_requests;
804de696716SAndreas Gruenbacher 	struct rb_root write_requests;
805b411b363SPhilipp Reisner 
806844a6ae7SLars Ellenberg 	/* for statistics and timeouts */
807844a6ae7SLars Ellenberg 	/* [0] read, [1] write */
808844a6ae7SLars Ellenberg 	struct list_head pending_master_completion[2];
809844a6ae7SLars Ellenberg 	struct list_head pending_completion[2];
810844a6ae7SLars Ellenberg 
811aaaba345SLars Ellenberg 	/* use checksums for *this* resync */
812aaaba345SLars Ellenberg 	bool use_csums;
8134b0715f0SLars Ellenberg 	/* blocks to resync in this run [unit BM_BLOCK_SIZE] */
814b411b363SPhilipp Reisner 	unsigned long rs_total;
8154b0715f0SLars Ellenberg 	/* number of resync blocks that failed in this run */
816b411b363SPhilipp Reisner 	unsigned long rs_failed;
817b411b363SPhilipp Reisner 	/* Syncer's start time [unit jiffies] */
818b411b363SPhilipp Reisner 	unsigned long rs_start;
819b411b363SPhilipp Reisner 	/* cumulated time in PausedSyncX state [unit jiffies] */
820b411b363SPhilipp Reisner 	unsigned long rs_paused;
8211d7734a0SLars Ellenberg 	/* skipped because csum was equal [unit BM_BLOCK_SIZE] */
822b411b363SPhilipp Reisner 	unsigned long rs_same_csum;
8231d7734a0SLars Ellenberg #define DRBD_SYNC_MARKS 8
8241d7734a0SLars Ellenberg #define DRBD_SYNC_MARK_STEP (3*HZ)
8251d7734a0SLars Ellenberg 	/* block not up-to-date at mark [unit BM_BLOCK_SIZE] */
8261d7734a0SLars Ellenberg 	unsigned long rs_mark_left[DRBD_SYNC_MARKS];
8271d7734a0SLars Ellenberg 	/* marks's time [unit jiffies] */
8281d7734a0SLars Ellenberg 	unsigned long rs_mark_time[DRBD_SYNC_MARKS];
8291d7734a0SLars Ellenberg 	/* current index into rs_mark_{left,time} */
8301d7734a0SLars Ellenberg 	int rs_last_mark;
831328e0f12SPhilipp Reisner 	unsigned long rs_last_bcast; /* [unit jiffies] */
832b411b363SPhilipp Reisner 
833b411b363SPhilipp Reisner 	/* where does the admin want us to start? (sector) */
834b411b363SPhilipp Reisner 	sector_t ov_start_sector;
83502b91b55SLars Ellenberg 	sector_t ov_stop_sector;
836b411b363SPhilipp Reisner 	/* where are we now? (sector) */
837b411b363SPhilipp Reisner 	sector_t ov_position;
838b411b363SPhilipp Reisner 	/* Start sector of out of sync range (to merge printk reporting). */
839b411b363SPhilipp Reisner 	sector_t ov_last_oos_start;
840b411b363SPhilipp Reisner 	/* size of out-of-sync range in sectors. */
841b411b363SPhilipp Reisner 	sector_t ov_last_oos_size;
842b411b363SPhilipp Reisner 	unsigned long ov_left; /* in bits */
843b411b363SPhilipp Reisner 
844b411b363SPhilipp Reisner 	struct drbd_bitmap *bitmap;
845b411b363SPhilipp Reisner 	unsigned long bm_resync_fo; /* bit offset for drbd_bm_find_next */
846b411b363SPhilipp Reisner 
847b411b363SPhilipp Reisner 	/* Used to track operations of resync... */
848b411b363SPhilipp Reisner 	struct lru_cache *resync;
849b411b363SPhilipp Reisner 	/* Number of locked elements in resync LRU */
850b411b363SPhilipp Reisner 	unsigned int resync_locked;
851b411b363SPhilipp Reisner 	/* resync extent number waiting for application requests */
852b411b363SPhilipp Reisner 	unsigned int resync_wenr;
853b411b363SPhilipp Reisner 
854b411b363SPhilipp Reisner 	int open_cnt;
855b411b363SPhilipp Reisner 	u64 *p_uuid;
8564b0007c0SPhilipp Reisner 
85785719573SPhilipp Reisner 	struct list_head active_ee; /* IO in progress (P_DATA gets written to disk) */
85885719573SPhilipp Reisner 	struct list_head sync_ee;   /* IO in progress (P_RS_DATA_REPLY gets written to disk) */
85918b75d75SAndreas Gruenbacher 	struct list_head done_ee;   /* need to send P_WRITE_ACK */
86018b75d75SAndreas Gruenbacher 	struct list_head read_ee;   /* [RS]P_DATA_REQUEST being read */
861b411b363SPhilipp Reisner 	struct list_head net_ee;    /* zero-copy network send in progress */
862b411b363SPhilipp Reisner 
863b411b363SPhilipp Reisner 	struct list_head resync_reads;
864435f0740SLars Ellenberg 	atomic_t pp_in_use;		/* allocated from page pool */
865435f0740SLars Ellenberg 	atomic_t pp_in_use_by_net;	/* sendpage()d, still referenced by tcp */
866b411b363SPhilipp Reisner 	wait_queue_head_t ee_wait;
867cc94c650SPhilipp Reisner 	struct drbd_md_io md_io;
868b411b363SPhilipp Reisner 	spinlock_t al_lock;
869b411b363SPhilipp Reisner 	wait_queue_head_t al_wait;
870b411b363SPhilipp Reisner 	struct lru_cache *act_log;	/* activity log */
871b411b363SPhilipp Reisner 	unsigned int al_tr_number;
872b411b363SPhilipp Reisner 	int al_tr_cycle;
873b411b363SPhilipp Reisner 	wait_queue_head_t seq_wait;
874b411b363SPhilipp Reisner 	atomic_t packet_seq;
875b411b363SPhilipp Reisner 	unsigned int peer_seq;
876b411b363SPhilipp Reisner 	spinlock_t peer_seq_lock;
877b411b363SPhilipp Reisner 	unsigned long comm_bm_set; /* communicated number of set bits. */
878b411b363SPhilipp Reisner 	struct bm_io_work bm_io_work;
879b411b363SPhilipp Reisner 	u64 ed_uuid; /* UUID of the exposed data */
8808410da8fSPhilipp Reisner 	struct mutex own_state_mutex;
881a6b32bc3SAndreas Gruenbacher 	struct mutex *state_mutex; /* either own_state_mutex or first_peer_device(device)->connection->cstate_mutex */
882b411b363SPhilipp Reisner 	char congestion_reason;  /* Why we where congested... */
8831d7734a0SLars Ellenberg 	atomic_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
8841d7734a0SLars Ellenberg 	atomic_t rs_sect_ev; /* for submitted resync data rate, both */
8851d7734a0SLars Ellenberg 	int rs_last_sect_ev; /* counter to compare with */
8861d7734a0SLars Ellenberg 	int rs_last_events;  /* counter of read or write "events" (unit sectors)
8871d7734a0SLars Ellenberg 			      * on the lower level device when we last looked. */
8881d7734a0SLars Ellenberg 	int c_sync_rate; /* current resync rate after syncer throttle magic */
889bde89a9eSAndreas Gruenbacher 	struct fifo_buffer *rs_plan_s; /* correction values of resync planer (RCU, connection->conn_update) */
890778f271dSPhilipp Reisner 	int rs_in_flight; /* resync sectors in flight (to proxy, in proxy and from proxy) */
891759fbdfbSPhilipp Reisner 	atomic_t ap_in_flight; /* App sectors in flight (waiting for ack) */
892db141b2fSLars Ellenberg 	unsigned int peer_max_bio_size;
893db141b2fSLars Ellenberg 	unsigned int local_max_bio_size;
894113fef9eSLars Ellenberg 
895113fef9eSLars Ellenberg 	/* any requests that would block in drbd_make_request()
896113fef9eSLars Ellenberg 	 * are deferred to this single-threaded work queue */
897113fef9eSLars Ellenberg 	struct submit_worker submit;
898b411b363SPhilipp Reisner };
899b411b363SPhilipp Reisner 
9004ce49266SLars Ellenberg struct drbd_bm_aio_ctx {
9014ce49266SLars Ellenberg 	struct drbd_device *device;
9024ce49266SLars Ellenberg 	struct list_head list; /* on device->pending_bitmap_io */;
9034ce49266SLars Ellenberg 	unsigned long start_jif;
9044ce49266SLars Ellenberg 	atomic_t in_flight;
9054ce49266SLars Ellenberg 	unsigned int done;
9064ce49266SLars Ellenberg 	unsigned flags;
9074ce49266SLars Ellenberg #define BM_AIO_COPY_PAGES	1
9084ce49266SLars Ellenberg #define BM_AIO_WRITE_HINTED	2
9094ce49266SLars Ellenberg #define BM_AIO_WRITE_ALL_PAGES	4
9104ce49266SLars Ellenberg #define BM_AIO_READ		8
9114ce49266SLars Ellenberg 	int error;
9124ce49266SLars Ellenberg 	struct kref kref;
9134ce49266SLars Ellenberg };
9144ce49266SLars Ellenberg 
915a910b123SLars Ellenberg struct drbd_config_context {
916a910b123SLars Ellenberg 	/* assigned from drbd_genlmsghdr */
917a910b123SLars Ellenberg 	unsigned int minor;
918a910b123SLars Ellenberg 	/* assigned from request attributes, if present */
919a910b123SLars Ellenberg 	unsigned int volume;
920a910b123SLars Ellenberg #define VOLUME_UNSPECIFIED		(-1U)
921a910b123SLars Ellenberg 	/* pointer into the request skb,
922a910b123SLars Ellenberg 	 * limited lifetime! */
923a910b123SLars Ellenberg 	char *resource_name;
924a910b123SLars Ellenberg 	struct nlattr *my_addr;
925a910b123SLars Ellenberg 	struct nlattr *peer_addr;
926a910b123SLars Ellenberg 
927a910b123SLars Ellenberg 	/* reply buffer */
928a910b123SLars Ellenberg 	struct sk_buff *reply_skb;
929a910b123SLars Ellenberg 	/* pointer into reply buffer */
930a910b123SLars Ellenberg 	struct drbd_genlmsghdr *reply_dh;
931a910b123SLars Ellenberg 	/* resolved from attributes, if possible */
932a910b123SLars Ellenberg 	struct drbd_device *device;
933a910b123SLars Ellenberg 	struct drbd_resource *resource;
934a910b123SLars Ellenberg 	struct drbd_connection *connection;
935a910b123SLars Ellenberg };
936a910b123SLars Ellenberg 
minor_to_device(unsigned int minor)937b30ab791SAndreas Gruenbacher static inline struct drbd_device *minor_to_device(unsigned int minor)
938b411b363SPhilipp Reisner {
93905a10ec7SAndreas Gruenbacher 	return (struct drbd_device *)idr_find(&drbd_devices, minor);
940b411b363SPhilipp Reisner }
941b411b363SPhilipp Reisner 
first_peer_device(struct drbd_device * device)942a6b32bc3SAndreas Gruenbacher static inline struct drbd_peer_device *first_peer_device(struct drbd_device *device)
943a6b32bc3SAndreas Gruenbacher {
944ec4a3407SLars Ellenberg 	return list_first_entry_or_null(&device->peer_devices, struct drbd_peer_device, peer_devices);
945a6b32bc3SAndreas Gruenbacher }
946a6b32bc3SAndreas Gruenbacher 
947a2972846SAndreas Gruenbacher static inline struct drbd_peer_device *
conn_peer_device(struct drbd_connection * connection,int volume_number)948a2972846SAndreas Gruenbacher conn_peer_device(struct drbd_connection *connection, int volume_number)
949a2972846SAndreas Gruenbacher {
950a2972846SAndreas Gruenbacher 	return idr_find(&connection->peer_devices, volume_number);
951a2972846SAndreas Gruenbacher }
952a2972846SAndreas Gruenbacher 
95377c556f6SAndreas Gruenbacher #define for_each_resource(resource, _resources) \
95477c556f6SAndreas Gruenbacher 	list_for_each_entry(resource, _resources, resources)
95577c556f6SAndreas Gruenbacher 
95677c556f6SAndreas Gruenbacher #define for_each_resource_rcu(resource, _resources) \
95777c556f6SAndreas Gruenbacher 	list_for_each_entry_rcu(resource, _resources, resources)
95877c556f6SAndreas Gruenbacher 
95977c556f6SAndreas Gruenbacher #define for_each_resource_safe(resource, tmp, _resources) \
96077c556f6SAndreas Gruenbacher 	list_for_each_entry_safe(resource, tmp, _resources, resources)
96177c556f6SAndreas Gruenbacher 
96277c556f6SAndreas Gruenbacher #define for_each_connection(connection, resource) \
96377c556f6SAndreas Gruenbacher 	list_for_each_entry(connection, &resource->connections, connections)
96477c556f6SAndreas Gruenbacher 
96577c556f6SAndreas Gruenbacher #define for_each_connection_rcu(connection, resource) \
96677c556f6SAndreas Gruenbacher 	list_for_each_entry_rcu(connection, &resource->connections, connections)
96777c556f6SAndreas Gruenbacher 
96877c556f6SAndreas Gruenbacher #define for_each_connection_safe(connection, tmp, resource) \
96977c556f6SAndreas Gruenbacher 	list_for_each_entry_safe(connection, tmp, &resource->connections, connections)
97077c556f6SAndreas Gruenbacher 
971a6b32bc3SAndreas Gruenbacher #define for_each_peer_device(peer_device, device) \
972a6b32bc3SAndreas Gruenbacher 	list_for_each_entry(peer_device, &device->peer_devices, peer_devices)
973a6b32bc3SAndreas Gruenbacher 
974a6b32bc3SAndreas Gruenbacher #define for_each_peer_device_rcu(peer_device, device) \
975a6b32bc3SAndreas Gruenbacher 	list_for_each_entry_rcu(peer_device, &device->peer_devices, peer_devices)
976a6b32bc3SAndreas Gruenbacher 
977a6b32bc3SAndreas Gruenbacher #define for_each_peer_device_safe(peer_device, tmp, device) \
978a6b32bc3SAndreas Gruenbacher 	list_for_each_entry_safe(peer_device, tmp, &device->peer_devices, peer_devices)
979a6b32bc3SAndreas Gruenbacher 
device_to_minor(struct drbd_device * device)980b30ab791SAndreas Gruenbacher static inline unsigned int device_to_minor(struct drbd_device *device)
981b411b363SPhilipp Reisner {
982b30ab791SAndreas Gruenbacher 	return device->minor;
983b411b363SPhilipp Reisner }
984b411b363SPhilipp Reisner 
985b411b363SPhilipp Reisner /*
986b411b363SPhilipp Reisner  * function declarations
987b411b363SPhilipp Reisner  *************************/
988b411b363SPhilipp Reisner 
989b411b363SPhilipp Reisner /* drbd_main.c */
990b411b363SPhilipp Reisner 
991e89b591cSPhilipp Reisner enum dds_flags {
992e89b591cSPhilipp Reisner 	DDSF_FORCED    = 1,
993e89b591cSPhilipp Reisner 	DDSF_NO_RESYNC = 2, /* Do not run a resync for the new space */
994e89b591cSPhilipp Reisner };
995e89b591cSPhilipp Reisner 
996b30ab791SAndreas Gruenbacher extern void drbd_init_set_defaults(struct drbd_device *device);
997b411b363SPhilipp Reisner extern int  drbd_thread_start(struct drbd_thread *thi);
998b411b363SPhilipp Reisner extern void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait);
999b411b363SPhilipp Reisner #ifdef CONFIG_SMP
100080822284SPhilipp Reisner extern void drbd_thread_current_set_cpu(struct drbd_thread *thi);
1001b411b363SPhilipp Reisner #else
1002b411b363SPhilipp Reisner #define drbd_thread_current_set_cpu(A) ({})
1003b411b363SPhilipp Reisner #endif
1004bde89a9eSAndreas Gruenbacher extern void tl_release(struct drbd_connection *, unsigned int barrier_nr,
1005b411b363SPhilipp Reisner 		       unsigned int set_size);
1006bde89a9eSAndreas Gruenbacher extern void tl_clear(struct drbd_connection *);
1007bde89a9eSAndreas Gruenbacher extern void drbd_free_sock(struct drbd_connection *connection);
1008bde89a9eSAndreas Gruenbacher extern int drbd_send(struct drbd_connection *connection, struct socket *sock,
1009b411b363SPhilipp Reisner 		     void *buf, size_t size, unsigned msg_flags);
1010bde89a9eSAndreas Gruenbacher extern int drbd_send_all(struct drbd_connection *, struct socket *, void *, size_t,
1011fb708e40SAndreas Gruenbacher 			 unsigned);
1012fb708e40SAndreas Gruenbacher 
1013bde89a9eSAndreas Gruenbacher extern int __drbd_send_protocol(struct drbd_connection *connection, enum drbd_packet cmd);
1014bde89a9eSAndreas Gruenbacher extern int drbd_send_protocol(struct drbd_connection *connection);
101569a22773SAndreas Gruenbacher extern int drbd_send_uuids(struct drbd_peer_device *);
101669a22773SAndreas Gruenbacher extern int drbd_send_uuids_skip_initial_sync(struct drbd_peer_device *);
101769a22773SAndreas Gruenbacher extern void drbd_gen_and_send_sync_uuid(struct drbd_peer_device *);
101869a22773SAndreas Gruenbacher extern int drbd_send_sizes(struct drbd_peer_device *, int trigger_reply, enum dds_flags flags);
101969a22773SAndreas Gruenbacher extern int drbd_send_state(struct drbd_peer_device *, union drbd_state s);
102069a22773SAndreas Gruenbacher extern int drbd_send_current_state(struct drbd_peer_device *);
102169a22773SAndreas Gruenbacher extern int drbd_send_sync_param(struct drbd_peer_device *);
1022bde89a9eSAndreas Gruenbacher extern void drbd_send_b_ack(struct drbd_connection *connection, u32 barrier_nr,
1023b411b363SPhilipp Reisner 			    u32 set_size);
102469a22773SAndreas Gruenbacher extern int drbd_send_ack(struct drbd_peer_device *, enum drbd_packet,
1025f6ffca9fSAndreas Gruenbacher 			 struct drbd_peer_request *);
102669a22773SAndreas Gruenbacher extern void drbd_send_ack_rp(struct drbd_peer_device *, enum drbd_packet,
1027b411b363SPhilipp Reisner 			     struct p_block_req *rp);
102869a22773SAndreas Gruenbacher extern void drbd_send_ack_dp(struct drbd_peer_device *, enum drbd_packet,
10292b2bf214SLars Ellenberg 			     struct p_data *dp, int data_size);
103069a22773SAndreas Gruenbacher extern int drbd_send_ack_ex(struct drbd_peer_device *, enum drbd_packet,
1031b411b363SPhilipp Reisner 			    sector_t sector, int blksize, u64 block_id);
103269a22773SAndreas Gruenbacher extern int drbd_send_out_of_sync(struct drbd_peer_device *, struct drbd_request *);
103369a22773SAndreas Gruenbacher extern int drbd_send_block(struct drbd_peer_device *, enum drbd_packet,
1034f6ffca9fSAndreas Gruenbacher 			   struct drbd_peer_request *);
103569a22773SAndreas Gruenbacher extern int drbd_send_dblock(struct drbd_peer_device *, struct drbd_request *req);
103669a22773SAndreas Gruenbacher extern int drbd_send_drequest(struct drbd_peer_device *, int cmd,
1037b411b363SPhilipp Reisner 			      sector_t sector, int size, u64 block_id);
103869a22773SAndreas Gruenbacher extern int drbd_send_drequest_csum(struct drbd_peer_device *, sector_t sector,
1039d8763023SAndreas Gruenbacher 				   int size, void *digest, int digest_size,
1040d8763023SAndreas Gruenbacher 				   enum drbd_packet cmd);
104169a22773SAndreas Gruenbacher extern int drbd_send_ov_request(struct drbd_peer_device *, sector_t sector, int size);
1042b411b363SPhilipp Reisner 
10438164dd6cSAndreas Gruenbacher extern int drbd_send_bitmap(struct drbd_device *device, struct drbd_peer_device *peer_device);
104469a22773SAndreas Gruenbacher extern void drbd_send_sr_reply(struct drbd_peer_device *, enum drbd_state_rv retcode);
1045bde89a9eSAndreas Gruenbacher extern void conn_send_sr_reply(struct drbd_connection *connection, enum drbd_state_rv retcode);
1046700ca8c0SPhilipp Reisner extern int drbd_send_rs_deallocated(struct drbd_peer_device *, struct drbd_peer_request *);
104763a7c8adSLars Ellenberg extern void drbd_backing_dev_free(struct drbd_device *device, struct drbd_backing_dev *ldev);
1048b30ab791SAndreas Gruenbacher extern void drbd_device_cleanup(struct drbd_device *device);
1049c51a0ef3SLars Ellenberg extern void drbd_print_uuids(struct drbd_device *device, const char *text);
1050c51a0ef3SLars Ellenberg extern void drbd_queue_unplug(struct drbd_device *device);
1051b411b363SPhilipp Reisner 
1052bde89a9eSAndreas Gruenbacher extern void conn_md_sync(struct drbd_connection *connection);
1053b30ab791SAndreas Gruenbacher extern void drbd_md_write(struct drbd_device *device, void *buffer);
1054b30ab791SAndreas Gruenbacher extern void drbd_md_sync(struct drbd_device *device);
1055b30ab791SAndreas Gruenbacher extern int  drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev);
1056b30ab791SAndreas Gruenbacher extern void drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
1057b30ab791SAndreas Gruenbacher extern void _drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
1058b30ab791SAndreas Gruenbacher extern void drbd_uuid_new_current(struct drbd_device *device) __must_hold(local);
1059b30ab791SAndreas Gruenbacher extern void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local);
1060b30ab791SAndreas Gruenbacher extern void drbd_uuid_move_history(struct drbd_device *device) __must_hold(local);
1061b30ab791SAndreas Gruenbacher extern void __drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
1062b30ab791SAndreas Gruenbacher extern void drbd_md_set_flag(struct drbd_device *device, int flags) __must_hold(local);
1063b30ab791SAndreas Gruenbacher extern void drbd_md_clear_flag(struct drbd_device *device, int flags)__must_hold(local);
1064b411b363SPhilipp Reisner extern int drbd_md_test_flag(struct drbd_backing_dev *, int);
1065b30ab791SAndreas Gruenbacher extern void drbd_md_mark_dirty(struct drbd_device *device);
1066b30ab791SAndreas Gruenbacher extern void drbd_queue_bitmap_io(struct drbd_device *device,
10678164dd6cSAndreas Gruenbacher 				 int (*io_fn)(struct drbd_device *, struct drbd_peer_device *),
106854761697SAndreas Gruenbacher 				 void (*done)(struct drbd_device *, int),
10698164dd6cSAndreas Gruenbacher 				 char *why, enum bm_flag flags,
10708164dd6cSAndreas Gruenbacher 				 struct drbd_peer_device *peer_device);
1071b30ab791SAndreas Gruenbacher extern int drbd_bitmap_io(struct drbd_device *device,
10728164dd6cSAndreas Gruenbacher 		int (*io_fn)(struct drbd_device *, struct drbd_peer_device *),
10738164dd6cSAndreas Gruenbacher 		char *why, enum bm_flag flags,
10748164dd6cSAndreas Gruenbacher 		struct drbd_peer_device *peer_device);
1075b30ab791SAndreas Gruenbacher extern int drbd_bitmap_io_from_worker(struct drbd_device *device,
10768164dd6cSAndreas Gruenbacher 		int (*io_fn)(struct drbd_device *, struct drbd_peer_device *),
10778164dd6cSAndreas Gruenbacher 		char *why, enum bm_flag flags,
10788164dd6cSAndreas Gruenbacher 		struct drbd_peer_device *peer_device);
10798164dd6cSAndreas Gruenbacher extern int drbd_bmio_set_n_write(struct drbd_device *device,
10808164dd6cSAndreas Gruenbacher 		struct drbd_peer_device *peer_device) __must_hold(local);
10818164dd6cSAndreas Gruenbacher extern int drbd_bmio_clear_n_write(struct drbd_device *device,
10828164dd6cSAndreas Gruenbacher 		struct drbd_peer_device *peer_device) __must_hold(local);
1083b411b363SPhilipp Reisner 
1084b411b363SPhilipp Reisner /* Meta data layout
1085ae8bf312SLars Ellenberg  *
1086ae8bf312SLars Ellenberg  * We currently have two possible layouts.
1087ae8bf312SLars Ellenberg  * Offsets in (512 byte) sectors.
1088ae8bf312SLars Ellenberg  * external:
1089ae8bf312SLars Ellenberg  *   |----------- md_size_sect ------------------|
1090ae8bf312SLars Ellenberg  *   [ 4k superblock ][ activity log ][  Bitmap  ]
1091ae8bf312SLars Ellenberg  *   | al_offset == 8 |
1092ae8bf312SLars Ellenberg  *   | bm_offset = al_offset + X      |
1093ae8bf312SLars Ellenberg  *  ==> bitmap sectors = md_size_sect - bm_offset
1094ae8bf312SLars Ellenberg  *
1095ae8bf312SLars Ellenberg  *  Variants:
1096ae8bf312SLars Ellenberg  *     old, indexed fixed size meta data:
1097ae8bf312SLars Ellenberg  *
1098ae8bf312SLars Ellenberg  * internal:
1099ae8bf312SLars Ellenberg  *            |----------- md_size_sect ------------------|
1100ae8bf312SLars Ellenberg  * [data.....][  Bitmap  ][ activity log ][ 4k superblock ][padding*]
1101ae8bf312SLars Ellenberg  *                        | al_offset < 0 |
1102ae8bf312SLars Ellenberg  *            | bm_offset = al_offset - Y |
1103ae8bf312SLars Ellenberg  *  ==> bitmap sectors = Y = al_offset - bm_offset
1104ae8bf312SLars Ellenberg  *
1105ae8bf312SLars Ellenberg  *  [padding*] are zero or up to 7 unused 512 Byte sectors to the
1106ae8bf312SLars Ellenberg  *  end of the device, so that the [4k superblock] will be 4k aligned.
1107ae8bf312SLars Ellenberg  *
1108ae8bf312SLars Ellenberg  *  The activity log consists of 4k transaction blocks,
1109ae8bf312SLars Ellenberg  *  which are written in a ring-buffer, or striped ring-buffer like fashion,
1110ae8bf312SLars Ellenberg  *  which are writtensize used to be fixed 32kB,
1111ae8bf312SLars Ellenberg  *  but is about to become configurable.
1112ae8bf312SLars Ellenberg  */
1113b411b363SPhilipp Reisner 
1114ae8bf312SLars Ellenberg /* Our old fixed size meta data layout
1115ae8bf312SLars Ellenberg  * allows up to about 3.8TB, so if you want more,
11167ad651b5SLars Ellenberg  * you need to use the "flexible" meta data format. */
1117ae8bf312SLars Ellenberg #define MD_128MB_SECT (128LLU << 11)  /* 128 MB, unit sectors */
1118ae8bf312SLars Ellenberg #define MD_4kB_SECT	 8
1119ae8bf312SLars Ellenberg #define MD_32kB_SECT	64
1120b411b363SPhilipp Reisner 
11217ad651b5SLars Ellenberg /* One activity log extent represents 4M of storage */
11227ad651b5SLars Ellenberg #define AL_EXTENT_SHIFT 22
1123b411b363SPhilipp Reisner #define AL_EXTENT_SIZE (1<<AL_EXTENT_SHIFT)
1124b411b363SPhilipp Reisner 
11257ad651b5SLars Ellenberg /* We could make these currently hardcoded constants configurable
11267ad651b5SLars Ellenberg  * variables at create-md time (or even re-configurable at runtime?).
11277ad651b5SLars Ellenberg  * Which will require some more changes to the DRBD "super block"
11287ad651b5SLars Ellenberg  * and attach code.
11297ad651b5SLars Ellenberg  *
11307ad651b5SLars Ellenberg  * updates per transaction:
11317ad651b5SLars Ellenberg  *   This many changes to the active set can be logged with one transaction.
11327ad651b5SLars Ellenberg  *   This number is arbitrary.
11337ad651b5SLars Ellenberg  * context per transaction:
11347ad651b5SLars Ellenberg  *   This many context extent numbers are logged with each transaction.
11357ad651b5SLars Ellenberg  *   This number is resulting from the transaction block size (4k), the layout
11367ad651b5SLars Ellenberg  *   of the transaction header, and the number of updates per transaction.
11377ad651b5SLars Ellenberg  *   See drbd_actlog.c:struct al_transaction_on_disk
11387ad651b5SLars Ellenberg  * */
11397ad651b5SLars Ellenberg #define AL_UPDATES_PER_TRANSACTION	 64	// arbitrary
11407ad651b5SLars Ellenberg #define AL_CONTEXT_PER_TRANSACTION	919	// (4096 - 36 - 6*64)/4
11417ad651b5SLars Ellenberg 
1142b411b363SPhilipp Reisner #if BITS_PER_LONG == 32
1143b411b363SPhilipp Reisner #define LN2_BPL 5
1144b411b363SPhilipp Reisner #define cpu_to_lel(A) cpu_to_le32(A)
1145b411b363SPhilipp Reisner #define lel_to_cpu(A) le32_to_cpu(A)
1146b411b363SPhilipp Reisner #elif BITS_PER_LONG == 64
1147b411b363SPhilipp Reisner #define LN2_BPL 6
1148b411b363SPhilipp Reisner #define cpu_to_lel(A) cpu_to_le64(A)
1149b411b363SPhilipp Reisner #define lel_to_cpu(A) le64_to_cpu(A)
1150b411b363SPhilipp Reisner #else
1151b411b363SPhilipp Reisner #error "LN2 of BITS_PER_LONG unknown!"
1152b411b363SPhilipp Reisner #endif
1153b411b363SPhilipp Reisner 
1154b411b363SPhilipp Reisner /* resync bitmap */
1155b411b363SPhilipp Reisner /* 16MB sized 'bitmap extent' to track syncer usage */
1156b411b363SPhilipp Reisner struct bm_extent {
1157b411b363SPhilipp Reisner 	int rs_left; /* number of bits set (out of sync) in this extent. */
1158b411b363SPhilipp Reisner 	int rs_failed; /* number of failed resync requests in this extent. */
1159b411b363SPhilipp Reisner 	unsigned long flags;
1160b411b363SPhilipp Reisner 	struct lc_element lce;
1161b411b363SPhilipp Reisner };
1162b411b363SPhilipp Reisner 
1163b411b363SPhilipp Reisner #define BME_NO_WRITES  0  /* bm_extent.flags: no more requests on this one! */
1164b411b363SPhilipp Reisner #define BME_LOCKED     1  /* bm_extent.flags: syncer active on this one. */
1165e3555d85SPhilipp Reisner #define BME_PRIORITY   2  /* finish resync IO on this extent ASAP! App IO waiting! */
1166b411b363SPhilipp Reisner 
1167b411b363SPhilipp Reisner /* drbd_bitmap.c */
1168b411b363SPhilipp Reisner /*
1169b411b363SPhilipp Reisner  * We need to store one bit for a block.
1170b411b363SPhilipp Reisner  * Example: 1GB disk @ 4096 byte blocks ==> we need 32 KB bitmap.
1171b411b363SPhilipp Reisner  * Bit 0 ==> local node thinks this block is binary identical on both nodes
1172b411b363SPhilipp Reisner  * Bit 1 ==> local node thinks this block needs to be synced.
1173b411b363SPhilipp Reisner  */
1174b411b363SPhilipp Reisner 
11758e26f9ccSPhilipp Reisner #define SLEEP_TIME (HZ/10)
11768e26f9ccSPhilipp Reisner 
117745dfffebSLars Ellenberg /* We do bitmap IO in units of 4k blocks.
117845dfffebSLars Ellenberg  * We also still have a hardcoded 4k per bit relation. */
1179b411b363SPhilipp Reisner #define BM_BLOCK_SHIFT	12			 /* 4k per bit */
1180b411b363SPhilipp Reisner #define BM_BLOCK_SIZE	 (1<<BM_BLOCK_SHIFT)
118145dfffebSLars Ellenberg /* mostly arbitrarily set the represented size of one bitmap extent,
118245dfffebSLars Ellenberg  * aka resync extent, to 16 MiB (which is also 512 Byte worth of bitmap
118345dfffebSLars Ellenberg  * at 4k per bit resolution) */
118445dfffebSLars Ellenberg #define BM_EXT_SHIFT	 24	/* 16 MiB per resync extent */
1185b411b363SPhilipp Reisner #define BM_EXT_SIZE	 (1<<BM_EXT_SHIFT)
1186b411b363SPhilipp Reisner 
1187b411b363SPhilipp Reisner #if (BM_EXT_SHIFT != 24) || (BM_BLOCK_SHIFT != 12)
1188b411b363SPhilipp Reisner #error "HAVE YOU FIXED drbdmeta AS WELL??"
1189b411b363SPhilipp Reisner #endif
1190b411b363SPhilipp Reisner 
1191b411b363SPhilipp Reisner /* thus many _storage_ sectors are described by one bit */
1192b411b363SPhilipp Reisner #define BM_SECT_TO_BIT(x)   ((x)>>(BM_BLOCK_SHIFT-9))
1193b411b363SPhilipp Reisner #define BM_BIT_TO_SECT(x)   ((sector_t)(x)<<(BM_BLOCK_SHIFT-9))
1194b411b363SPhilipp Reisner #define BM_SECT_PER_BIT     BM_BIT_TO_SECT(1)
1195b411b363SPhilipp Reisner 
1196b411b363SPhilipp Reisner /* bit to represented kilo byte conversion */
1197b411b363SPhilipp Reisner #define Bit2KB(bits) ((bits)<<(BM_BLOCK_SHIFT-10))
1198b411b363SPhilipp Reisner 
1199b411b363SPhilipp Reisner /* in which _bitmap_ extent (resp. sector) the bit for a certain
1200b411b363SPhilipp Reisner  * _storage_ sector is located in */
1201b411b363SPhilipp Reisner #define BM_SECT_TO_EXT(x)   ((x)>>(BM_EXT_SHIFT-9))
12025ab7d2c0SLars Ellenberg #define BM_BIT_TO_EXT(x)    ((x) >> (BM_EXT_SHIFT - BM_BLOCK_SHIFT))
1203b411b363SPhilipp Reisner 
12045ab7d2c0SLars Ellenberg /* first storage sector a bitmap extent corresponds to */
1205b411b363SPhilipp Reisner #define BM_EXT_TO_SECT(x)   ((sector_t)(x) << (BM_EXT_SHIFT-9))
12065ab7d2c0SLars Ellenberg /* how much _storage_ sectors we have per bitmap extent */
1207b411b363SPhilipp Reisner #define BM_SECT_PER_EXT     BM_EXT_TO_SECT(1)
12085ab7d2c0SLars Ellenberg /* how many bits are covered by one bitmap extent (resync extent) */
12095ab7d2c0SLars Ellenberg #define BM_BITS_PER_EXT     (1UL << (BM_EXT_SHIFT - BM_BLOCK_SHIFT))
12105ab7d2c0SLars Ellenberg 
12115ab7d2c0SLars Ellenberg #define BM_BLOCKS_PER_BM_EXT_MASK  (BM_BITS_PER_EXT - 1)
12125ab7d2c0SLars Ellenberg 
1213b411b363SPhilipp Reisner 
1214b411b363SPhilipp Reisner /* in one sector of the bitmap, we have this many activity_log extents. */
1215b411b363SPhilipp Reisner #define AL_EXT_PER_BM_SECT  (1 << (BM_EXT_SHIFT - AL_EXTENT_SHIFT))
1216b411b363SPhilipp Reisner 
1217b411b363SPhilipp Reisner /* the extent in "PER_EXTENT" below is an activity log extent
1218b411b363SPhilipp Reisner  * we need that many (long words/bytes) to store the bitmap
1219b411b363SPhilipp Reisner  *		     of one AL_EXTENT_SIZE chunk of storage.
1220b411b363SPhilipp Reisner  * we can store the bitmap for that many AL_EXTENTS within
1221b411b363SPhilipp Reisner  * one sector of the _on_disk_ bitmap:
1222b411b363SPhilipp Reisner  * bit	 0	  bit 37   bit 38	     bit (512*8)-1
1223b411b363SPhilipp Reisner  *	     ...|........|........|.. // ..|........|
1224b411b363SPhilipp Reisner  * sect. 0	 `296	  `304			   ^(512*8*8)-1
1225b411b363SPhilipp Reisner  *
1226b411b363SPhilipp Reisner #define BM_WORDS_PER_EXT    ( (AL_EXT_SIZE/BM_BLOCK_SIZE) / BITS_PER_LONG )
1227b411b363SPhilipp Reisner #define BM_BYTES_PER_EXT    ( (AL_EXT_SIZE/BM_BLOCK_SIZE) / 8 )  // 128
1228b411b363SPhilipp Reisner #define BM_EXT_PER_SECT	    ( 512 / BM_BYTES_PER_EXTENT )	 //   4
1229b411b363SPhilipp Reisner  */
1230b411b363SPhilipp Reisner 
1231b411b363SPhilipp Reisner #define DRBD_MAX_SECTORS_32 (0xffffffffLU)
1232ae8bf312SLars Ellenberg /* we have a certain meta data variant that has a fixed on-disk size of 128
1233ae8bf312SLars Ellenberg  * MiB, of which 4k are our "superblock", and 32k are the fixed size activity
1234ae8bf312SLars Ellenberg  * log, leaving this many sectors for the bitmap.
1235ae8bf312SLars Ellenberg  */
1236ae8bf312SLars Ellenberg 
1237ae8bf312SLars Ellenberg #define DRBD_MAX_SECTORS_FIXED_BM \
1238ae8bf312SLars Ellenberg 	  ((MD_128MB_SECT - MD_32kB_SECT - MD_4kB_SECT) * (1LL<<(BM_EXT_SHIFT-9)))
1239ae8bf312SLars Ellenberg #define DRBD_MAX_SECTORS      DRBD_MAX_SECTORS_FIXED_BM
1240b411b363SPhilipp Reisner /* 16 TB in units of sectors */
1241b411b363SPhilipp Reisner #if BITS_PER_LONG == 32
1242b411b363SPhilipp Reisner /* adjust by one page worth of bitmap,
1243b411b363SPhilipp Reisner  * so we won't wrap around in drbd_bm_find_next_bit.
1244b411b363SPhilipp Reisner  * you should use 64bit OS for that much storage, anyways. */
1245b411b363SPhilipp Reisner #define DRBD_MAX_SECTORS_FLEX BM_BIT_TO_SECT(0xffff7fff)
1246b411b363SPhilipp Reisner #else
12474b0715f0SLars Ellenberg /* we allow up to 1 PiB now on 64bit architecture with "flexible" meta data */
12484b0715f0SLars Ellenberg #define DRBD_MAX_SECTORS_FLEX (1UL << 51)
12494b0715f0SLars Ellenberg /* corresponds to (1UL << 38) bits right now. */
1250b411b363SPhilipp Reisner #endif
1251b411b363SPhilipp Reisner 
12528bf223c2SMing Lei /* Estimate max bio size as 256 * PAGE_SIZE,
1253ea1754a0SKirill A. Shutemov  * so for typical PAGE_SIZE of 4k, that is (1<<20) Byte.
125423361cf3SLars Ellenberg  * Since we may live in a mixed-platform cluster,
125523361cf3SLars Ellenberg  * we limit us to a platform agnostic constant here for now.
125623361cf3SLars Ellenberg  * A followup commit may allow even bigger BIO sizes,
125723361cf3SLars Ellenberg  * once we thought that through. */
125898683650SPhilipp Reisner #define DRBD_MAX_BIO_SIZE (1U << 20)
1259a8affc03SChristoph Hellwig #if DRBD_MAX_BIO_SIZE > (BIO_MAX_VECS << PAGE_SHIFT)
126023361cf3SLars Ellenberg #error Architecture not supported: DRBD_MAX_BIO_SIZE > BIO_MAX_SIZE
126123361cf3SLars Ellenberg #endif
1262db141b2fSLars Ellenberg #define DRBD_MAX_BIO_SIZE_SAFE (1U << 12)       /* Works always = 4k */
1263b411b363SPhilipp Reisner 
126498683650SPhilipp Reisner #define DRBD_MAX_SIZE_H80_PACKET (1U << 15) /* Header 80 only allows packets up to 32KiB data */
126598683650SPhilipp Reisner #define DRBD_MAX_BIO_SIZE_P95    (1U << 17) /* Protocol 95 to 99 allows bios up to 128KiB */
1266b411b363SPhilipp Reisner 
1267505675f9SLars Ellenberg /* For now, don't allow more than half of what we can "activate" in one
1268505675f9SLars Ellenberg  * activity log transaction to be discarded in one go. We may need to rework
1269505675f9SLars Ellenberg  * drbd_al_begin_io() to allow for even larger discard ranges */
12709104d31aSLars Ellenberg #define DRBD_MAX_BATCH_BIO_SIZE	 (AL_UPDATES_PER_TRANSACTION/2*AL_EXTENT_SIZE)
12719104d31aSLars Ellenberg #define DRBD_MAX_BBIO_SECTORS    (DRBD_MAX_BATCH_BIO_SIZE >> 9)
1272a0fb3c47SLars Ellenberg 
1273b30ab791SAndreas Gruenbacher extern int  drbd_bm_init(struct drbd_device *device);
1274b30ab791SAndreas Gruenbacher extern int  drbd_bm_resize(struct drbd_device *device, sector_t sectors, int set_new_bits);
1275b30ab791SAndreas Gruenbacher extern void drbd_bm_cleanup(struct drbd_device *device);
1276b30ab791SAndreas Gruenbacher extern void drbd_bm_set_all(struct drbd_device *device);
1277b30ab791SAndreas Gruenbacher extern void drbd_bm_clear_all(struct drbd_device *device);
12784b0715f0SLars Ellenberg /* set/clear/test only a few bits at a time */
1279b411b363SPhilipp Reisner extern int  drbd_bm_set_bits(
1280b30ab791SAndreas Gruenbacher 		struct drbd_device *device, unsigned long s, unsigned long e);
1281b411b363SPhilipp Reisner extern int  drbd_bm_clear_bits(
1282b30ab791SAndreas Gruenbacher 		struct drbd_device *device, unsigned long s, unsigned long e);
12834b0715f0SLars Ellenberg extern int drbd_bm_count_bits(
1284b30ab791SAndreas Gruenbacher 	struct drbd_device *device, const unsigned long s, const unsigned long e);
12854b0715f0SLars Ellenberg /* bm_set_bits variant for use while holding drbd_bm_lock,
12864b0715f0SLars Ellenberg  * may process the whole bitmap in one go */
1287b30ab791SAndreas Gruenbacher extern void _drbd_bm_set_bits(struct drbd_device *device,
1288b411b363SPhilipp Reisner 		const unsigned long s, const unsigned long e);
1289b30ab791SAndreas Gruenbacher extern int  drbd_bm_test_bit(struct drbd_device *device, unsigned long bitnr);
1290b30ab791SAndreas Gruenbacher extern int  drbd_bm_e_weight(struct drbd_device *device, unsigned long enr);
12918164dd6cSAndreas Gruenbacher extern int  drbd_bm_read(struct drbd_device *device,
12928164dd6cSAndreas Gruenbacher 		struct drbd_peer_device *peer_device) __must_hold(local);
1293b30ab791SAndreas Gruenbacher extern void drbd_bm_mark_for_writeout(struct drbd_device *device, int page_nr);
12948164dd6cSAndreas Gruenbacher extern int  drbd_bm_write(struct drbd_device *device,
12958164dd6cSAndreas Gruenbacher 		struct drbd_peer_device *peer_device) __must_hold(local);
129627ea1d87SLars Ellenberg extern void drbd_bm_reset_al_hints(struct drbd_device *device) __must_hold(local);
1297b30ab791SAndreas Gruenbacher extern int  drbd_bm_write_hinted(struct drbd_device *device) __must_hold(local);
1298c7a58db4SLars Ellenberg extern int  drbd_bm_write_lazy(struct drbd_device *device, unsigned upper_idx) __must_hold(local);
12998164dd6cSAndreas Gruenbacher extern int drbd_bm_write_all(struct drbd_device *device,
13008164dd6cSAndreas Gruenbacher 		struct drbd_peer_device *peer_device) __must_hold(local);
13018164dd6cSAndreas Gruenbacher extern int  drbd_bm_write_copy_pages(struct drbd_device *device,
13028164dd6cSAndreas Gruenbacher 		struct drbd_peer_device *peer_device) __must_hold(local);
1303b30ab791SAndreas Gruenbacher extern size_t	     drbd_bm_words(struct drbd_device *device);
1304b30ab791SAndreas Gruenbacher extern unsigned long drbd_bm_bits(struct drbd_device *device);
1305b30ab791SAndreas Gruenbacher extern sector_t      drbd_bm_capacity(struct drbd_device *device);
13064b0715f0SLars Ellenberg 
13074b0715f0SLars Ellenberg #define DRBD_END_OF_BITMAP	(~(unsigned long)0)
1308b30ab791SAndreas Gruenbacher extern unsigned long drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo);
1309b411b363SPhilipp Reisner /* bm_find_next variants for use while you hold drbd_bm_lock() */
1310b30ab791SAndreas Gruenbacher extern unsigned long _drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo);
1311b30ab791SAndreas Gruenbacher extern unsigned long _drbd_bm_find_next_zero(struct drbd_device *device, unsigned long bm_fo);
1312b30ab791SAndreas Gruenbacher extern unsigned long _drbd_bm_total_weight(struct drbd_device *device);
1313b30ab791SAndreas Gruenbacher extern unsigned long drbd_bm_total_weight(struct drbd_device *device);
1314b411b363SPhilipp Reisner /* for receive_bitmap */
1315b30ab791SAndreas Gruenbacher extern void drbd_bm_merge_lel(struct drbd_device *device, size_t offset,
1316b411b363SPhilipp Reisner 		size_t number, unsigned long *buffer);
131719f843aaSLars Ellenberg /* for _drbd_send_bitmap */
1318b30ab791SAndreas Gruenbacher extern void drbd_bm_get_lel(struct drbd_device *device, size_t offset,
1319b411b363SPhilipp Reisner 		size_t number, unsigned long *buffer);
1320b411b363SPhilipp Reisner 
1321b30ab791SAndreas Gruenbacher extern void drbd_bm_lock(struct drbd_device *device, char *why, enum bm_flag flags);
1322b30ab791SAndreas Gruenbacher extern void drbd_bm_unlock(struct drbd_device *device);
1323b411b363SPhilipp Reisner /* drbd_main.c */
1324b411b363SPhilipp Reisner 
1325b411b363SPhilipp Reisner extern struct kmem_cache *drbd_request_cache;
13266c852becSAndreas Gruenbacher extern struct kmem_cache *drbd_ee_cache;	/* peer requests */
1327b411b363SPhilipp Reisner extern struct kmem_cache *drbd_bm_ext_cache;	/* bitmap extents */
1328b411b363SPhilipp Reisner extern struct kmem_cache *drbd_al_ext_cache;	/* activity log extents */
13290892fac8SKent Overstreet extern mempool_t drbd_request_mempool;
13300892fac8SKent Overstreet extern mempool_t drbd_ee_mempool;
1331b411b363SPhilipp Reisner 
13324281808fSLars Ellenberg /* drbd's page pool, used to buffer data received from the peer,
13334281808fSLars Ellenberg  * or data requested by the peer.
13344281808fSLars Ellenberg  *
13354281808fSLars Ellenberg  * This does not have an emergency reserve.
13364281808fSLars Ellenberg  *
13374281808fSLars Ellenberg  * When allocating from this pool, it first takes pages from the pool.
13384281808fSLars Ellenberg  * Only if the pool is depleted will try to allocate from the system.
13394281808fSLars Ellenberg  *
13404281808fSLars Ellenberg  * The assumption is that pages taken from this pool will be processed,
13414281808fSLars Ellenberg  * and given back, "quickly", and then can be recycled, so we can avoid
13424281808fSLars Ellenberg  * frequent calls to alloc_page(), and still will be able to make progress even
13434281808fSLars Ellenberg  * under memory pressure.
13444281808fSLars Ellenberg  */
13454281808fSLars Ellenberg extern struct page *drbd_pp_pool;
1346b411b363SPhilipp Reisner extern spinlock_t   drbd_pp_lock;
1347b411b363SPhilipp Reisner extern int	    drbd_pp_vacant;
1348b411b363SPhilipp Reisner extern wait_queue_head_t drbd_pp_wait;
1349b411b363SPhilipp Reisner 
13504281808fSLars Ellenberg /* We also need a standard (emergency-reserve backed) page pool
13514281808fSLars Ellenberg  * for meta data IO (activity log, bitmap).
13524281808fSLars Ellenberg  * We can keep it global, as long as it is used as "N pages at a time".
13534281808fSLars Ellenberg  * 128 should be plenty, currently we probably can get away with as few as 1.
13544281808fSLars Ellenberg  */
13554281808fSLars Ellenberg #define DRBD_MIN_POOL_PAGES	128
13560892fac8SKent Overstreet extern mempool_t drbd_md_io_page_pool;
13574281808fSLars Ellenberg 
13589476f39dSLars Ellenberg /* We also need to make sure we get a bio
13599476f39dSLars Ellenberg  * when we need it for housekeeping purposes */
13600892fac8SKent Overstreet extern struct bio_set drbd_md_io_bio_set;
13619476f39dSLars Ellenberg 
13628cb0defbSNeilBrown /* And a bio_set for cloning */
13630892fac8SKent Overstreet extern struct bio_set drbd_io_bio_set;
13648cb0defbSNeilBrown 
136528bc3b8cSAndreas Gruenbacher extern struct mutex resources_mutex;
1366b411b363SPhilipp Reisner 
1367a910b123SLars Ellenberg extern enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsigned int minor);
136805a10ec7SAndreas Gruenbacher extern void drbd_destroy_device(struct kref *kref);
1369a910b123SLars Ellenberg extern void drbd_delete_device(struct drbd_device *device);
1370b411b363SPhilipp Reisner 
137177c556f6SAndreas Gruenbacher extern struct drbd_resource *drbd_create_resource(const char *name);
137277c556f6SAndreas Gruenbacher extern void drbd_free_resource(struct drbd_resource *resource);
137377c556f6SAndreas Gruenbacher 
1374eb6bea67SAndreas Gruenbacher extern int set_resource_options(struct drbd_resource *resource, struct res_opts *res_opts);
1375bde89a9eSAndreas Gruenbacher extern struct drbd_connection *conn_create(const char *name, struct res_opts *res_opts);
137605a10ec7SAndreas Gruenbacher extern void drbd_destroy_connection(struct kref *kref);
1377bde89a9eSAndreas Gruenbacher extern struct drbd_connection *conn_get_by_addrs(void *my_addr, int my_addr_len,
1378089c075dSAndreas Gruenbacher 					    void *peer_addr, int peer_addr_len);
13794bc76048SAndreas Gruenbacher extern struct drbd_resource *drbd_find_resource(const char *name);
138077c556f6SAndreas Gruenbacher extern void drbd_destroy_resource(struct kref *kref);
1381bde89a9eSAndreas Gruenbacher extern void conn_free_crypto(struct drbd_connection *connection);
1382b411b363SPhilipp Reisner 
1383b411b363SPhilipp Reisner /* drbd_req */
1384113fef9eSLars Ellenberg extern void do_submit(struct work_struct *ws);
1385370276baSGuoqing Jiang extern void __drbd_make_request(struct drbd_device *, struct bio *);
13863e08773cSChristoph Hellwig void drbd_submit_bio(struct bio *bio);
1387b411b363SPhilipp Reisner 
1388b411b363SPhilipp Reisner /* drbd_nl.c */
1389a2972846SAndreas Gruenbacher 
1390a2972846SAndreas Gruenbacher extern struct mutex notification_mutex;
1391a2972846SAndreas Gruenbacher 
1392b30ab791SAndreas Gruenbacher extern void drbd_suspend_io(struct drbd_device *device);
1393b30ab791SAndreas Gruenbacher extern void drbd_resume_io(struct drbd_device *device);
1394b411b363SPhilipp Reisner extern char *ppsize(char *buf, unsigned long long size);
139554761697SAndreas Gruenbacher extern sector_t drbd_new_dev_size(struct drbd_device *, struct drbd_backing_dev *, sector_t, int);
1396e96c9633SPhilipp Reisner enum determine_dev_size {
1397d752b269SPhilipp Reisner 	DS_ERROR_SHRINK = -3,
1398d752b269SPhilipp Reisner 	DS_ERROR_SPACE_MD = -2,
1399e96c9633SPhilipp Reisner 	DS_ERROR = -1,
1400e96c9633SPhilipp Reisner 	DS_UNCHANGED = 0,
1401e96c9633SPhilipp Reisner 	DS_SHRUNK = 1,
140257737adcSPhilipp Reisner 	DS_GREW = 2,
140357737adcSPhilipp Reisner 	DS_GREW_FROM_ZERO = 3,
1404e96c9633SPhilipp Reisner };
1405d752b269SPhilipp Reisner extern enum determine_dev_size
140654761697SAndreas Gruenbacher drbd_determine_dev_size(struct drbd_device *, enum dds_flags, struct resize_parms *) __must_hold(local);
140754761697SAndreas Gruenbacher extern void resync_after_online_grow(struct drbd_device *);
14089104d31aSLars Ellenberg extern void drbd_reconsider_queue_parameters(struct drbd_device *device,
14099104d31aSLars Ellenberg 			struct drbd_backing_dev *bdev, struct o_qlim *o);
1410b30ab791SAndreas Gruenbacher extern enum drbd_state_rv drbd_set_role(struct drbd_device *device,
1411bf885f8aSAndreas Gruenbacher 					enum drbd_role new_role,
1412b411b363SPhilipp Reisner 					int force);
1413bde89a9eSAndreas Gruenbacher extern bool conn_try_outdate_peer(struct drbd_connection *connection);
1414bde89a9eSAndreas Gruenbacher extern void conn_try_outdate_peer_async(struct drbd_connection *connection);
14157e5fec31SFabian Frederick extern enum drbd_peer_state conn_khelper(struct drbd_connection *connection, char *cmd);
1416b30ab791SAndreas Gruenbacher extern int drbd_khelper(struct drbd_device *device, char *cmd);
1417b411b363SPhilipp Reisner 
1418b411b363SPhilipp Reisner /* drbd_worker.c */
1419d40e5671SPhilipp Reisner /* bi_end_io handlers */
14204246a0b6SChristoph Hellwig extern void drbd_md_endio(struct bio *bio);
14214246a0b6SChristoph Hellwig extern void drbd_peer_request_endio(struct bio *bio);
14224246a0b6SChristoph Hellwig extern void drbd_request_endio(struct bio *bio);
1423b411b363SPhilipp Reisner extern int drbd_worker(struct drbd_thread *thi);
1424b30ab791SAndreas Gruenbacher enum drbd_ret_code drbd_resync_after_valid(struct drbd_device *device, int o_minor);
1425b30ab791SAndreas Gruenbacher void drbd_resync_after_changed(struct drbd_device *device);
1426b30ab791SAndreas Gruenbacher extern void drbd_start_resync(struct drbd_device *device, enum drbd_conns side);
1427b30ab791SAndreas Gruenbacher extern void resume_next_sg(struct drbd_device *device);
1428b30ab791SAndreas Gruenbacher extern void suspend_other_sg(struct drbd_device *device);
14290d11f3cfSChristoph Böhmwalder extern int drbd_resync_finished(struct drbd_peer_device *peer_device);
1430b411b363SPhilipp Reisner /* maybe rather drbd_main.c ? */
1431e37d2438SLars Ellenberg extern void *drbd_md_get_buffer(struct drbd_device *device, const char *intent);
1432b30ab791SAndreas Gruenbacher extern void drbd_md_put_buffer(struct drbd_device *device);
1433b30ab791SAndreas Gruenbacher extern int drbd_md_sync_page_io(struct drbd_device *device,
14349945172aSBart Van Assche 		struct drbd_backing_dev *bdev, sector_t sector, enum req_op op);
14350d11f3cfSChristoph Böhmwalder extern void drbd_ov_out_of_sync_found(struct drbd_peer_device *peer_device,
14360d11f3cfSChristoph Böhmwalder 		sector_t sector, int size);
1437b30ab791SAndreas Gruenbacher extern void wait_until_done_or_force_detached(struct drbd_device *device,
143844edfb0dSLars Ellenberg 		struct drbd_backing_dev *bdev, unsigned int *done);
14390d11f3cfSChristoph Böhmwalder extern void drbd_rs_controller_reset(struct drbd_peer_device *peer_device);
1440b411b363SPhilipp Reisner 
ov_out_of_sync_print(struct drbd_peer_device * peer_device)14410d11f3cfSChristoph Böhmwalder static inline void ov_out_of_sync_print(struct drbd_peer_device *peer_device)
1442b411b363SPhilipp Reisner {
14430d11f3cfSChristoph Böhmwalder 	struct drbd_device *device = peer_device->device;
14440d11f3cfSChristoph Böhmwalder 
1445b30ab791SAndreas Gruenbacher 	if (device->ov_last_oos_size) {
14460d11f3cfSChristoph Böhmwalder 		drbd_err(peer_device, "Out of sync: start=%llu, size=%lu (sectors)\n",
1447b30ab791SAndreas Gruenbacher 		     (unsigned long long)device->ov_last_oos_start,
1448b30ab791SAndreas Gruenbacher 		     (unsigned long)device->ov_last_oos_size);
1449b411b363SPhilipp Reisner 	}
1450b30ab791SAndreas Gruenbacher 	device->ov_last_oos_size = 0;
1451b411b363SPhilipp Reisner }
1452b411b363SPhilipp Reisner 
1453b411b363SPhilipp Reisner 
14543d0e6375SKees Cook extern void drbd_csum_bio(struct crypto_shash *, struct bio *, void *);
14553d0e6375SKees Cook extern void drbd_csum_ee(struct crypto_shash *, struct drbd_peer_request *,
14563d0e6375SKees Cook 			 void *);
1457b411b363SPhilipp Reisner /* worker callbacks */
145899920dc5SAndreas Gruenbacher extern int w_e_end_data_req(struct drbd_work *, int);
145999920dc5SAndreas Gruenbacher extern int w_e_end_rsdata_req(struct drbd_work *, int);
146099920dc5SAndreas Gruenbacher extern int w_e_end_csum_rs_req(struct drbd_work *, int);
146199920dc5SAndreas Gruenbacher extern int w_e_end_ov_reply(struct drbd_work *, int);
146299920dc5SAndreas Gruenbacher extern int w_e_end_ov_req(struct drbd_work *, int);
146399920dc5SAndreas Gruenbacher extern int w_ov_finished(struct drbd_work *, int);
146499920dc5SAndreas Gruenbacher extern int w_resync_timer(struct drbd_work *, int);
146599920dc5SAndreas Gruenbacher extern int w_send_write_hint(struct drbd_work *, int);
146699920dc5SAndreas Gruenbacher extern int w_send_dblock(struct drbd_work *, int);
146799920dc5SAndreas Gruenbacher extern int w_send_read_req(struct drbd_work *, int);
146899920dc5SAndreas Gruenbacher extern int w_restart_disk_io(struct drbd_work *, int);
14698f7bed77SAndreas Gruenbacher extern int w_send_out_of_sync(struct drbd_work *, int);
1470b411b363SPhilipp Reisner 
14712bccef39SKees Cook extern void resync_timer_fn(struct timer_list *t);
14722bccef39SKees Cook extern void start_resync_timer_fn(struct timer_list *t);
1473b411b363SPhilipp Reisner 
1474a0fb3c47SLars Ellenberg extern void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req);
1475a0fb3c47SLars Ellenberg 
1476b411b363SPhilipp Reisner /* drbd_receiver.c */
1477f31e583aSLars Ellenberg extern int drbd_issue_discard_or_zero_out(struct drbd_device *device,
1478f31e583aSLars Ellenberg 		sector_t start, unsigned int nr_sectors, int flags);
1479753c6191SAndreas Gruenbacher extern int drbd_receiver(struct drbd_thread *thi);
14801c03e520SPhilipp Reisner extern int drbd_ack_receiver(struct drbd_thread *thi);
1481668700b4SPhilipp Reisner extern void drbd_send_acks_wf(struct work_struct *ws);
1482e8299874SLars Ellenberg extern bool drbd_rs_c_min_rate_throttle(struct drbd_device *device);
14830d11f3cfSChristoph Böhmwalder extern bool drbd_rs_should_slow_down(struct drbd_peer_device *peer_device, sector_t sector,
1484ad3fee79SLars Ellenberg 		bool throttle_if_app_is_waiting);
1485ce668b6dSChristoph Böhmwalder extern int drbd_submit_peer_request(struct drbd_peer_request *peer_req);
148654761697SAndreas Gruenbacher extern int drbd_free_peer_reqs(struct drbd_device *, struct list_head *);
148769a22773SAndreas Gruenbacher extern struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_peer_device *, u64,
14880db55363SAndreas Gruenbacher 						     sector_t, unsigned int,
14899104d31aSLars Ellenberg 						     unsigned int,
1490f6ffca9fSAndreas Gruenbacher 						     gfp_t) __must_hold(local);
149154761697SAndreas Gruenbacher extern void __drbd_free_peer_req(struct drbd_device *, struct drbd_peer_request *,
1492f6ffca9fSAndreas Gruenbacher 				 int);
14933967deb1SAndreas Gruenbacher #define drbd_free_peer_req(m,e) __drbd_free_peer_req(m, e, 0)
14943967deb1SAndreas Gruenbacher #define drbd_free_net_peer_req(m,e) __drbd_free_peer_req(m, e, 1)
149569a22773SAndreas Gruenbacher extern struct page *drbd_alloc_pages(struct drbd_peer_device *, unsigned int, bool);
1496b30ab791SAndreas Gruenbacher extern void _drbd_clear_done_ee(struct drbd_device *device, struct list_head *to_be_freed);
149769a22773SAndreas Gruenbacher extern int drbd_connected(struct drbd_peer_device *);
1498b411b363SPhilipp Reisner 
1499d40e5671SPhilipp Reisner /* sets the number of 512 byte sectors of our virtual device */
1500d5412e8dSLars Ellenberg void drbd_set_my_capacity(struct drbd_device *device, sector_t size);
1501d40e5671SPhilipp Reisner 
1502d40e5671SPhilipp Reisner /*
1503d40e5671SPhilipp Reisner  * used to submit our private bio
1504d40e5671SPhilipp Reisner  */
drbd_submit_bio_noacct(struct drbd_device * device,int fault_type,struct bio * bio)1505ed00aabdSChristoph Hellwig static inline void drbd_submit_bio_noacct(struct drbd_device *device,
1506d40e5671SPhilipp Reisner 					     int fault_type, struct bio *bio)
1507d40e5671SPhilipp Reisner {
1508d40e5671SPhilipp Reisner 	__release(local);
1509309dca30SChristoph Hellwig 	if (!bio->bi_bdev) {
1510309dca30SChristoph Hellwig 		drbd_err(device, "drbd_submit_bio_noacct: bio->bi_bdev == NULL\n");
15114e4cbee9SChristoph Hellwig 		bio->bi_status = BLK_STS_IOERR;
15124246a0b6SChristoph Hellwig 		bio_endio(bio);
1513d40e5671SPhilipp Reisner 		return;
1514d40e5671SPhilipp Reisner 	}
1515d40e5671SPhilipp Reisner 
1516d40e5671SPhilipp Reisner 	if (drbd_insert_fault(device, fault_type))
15174246a0b6SChristoph Hellwig 		bio_io_error(bio);
1518d40e5671SPhilipp Reisner 	else
1519ed00aabdSChristoph Hellwig 		submit_bio_noacct(bio);
1520d40e5671SPhilipp Reisner }
1521d40e5671SPhilipp Reisner 
15228fe39aacSPhilipp Reisner void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backing_dev *bdev,
15238fe39aacSPhilipp Reisner 			      enum write_ordering_e wo);
1524b411b363SPhilipp Reisner 
1525b411b363SPhilipp Reisner /* drbd_proc.c */
1526b411b363SPhilipp Reisner extern struct proc_dir_entry *drbd_proc;
1527004fd11dSChristoph Hellwig int drbd_seq_show(struct seq_file *seq, void *v);
1528b411b363SPhilipp Reisner 
1529b411b363SPhilipp Reisner /* drbd_actlog.c */
1530e4d7d6f4SLars Ellenberg extern bool drbd_al_begin_io_prepare(struct drbd_device *device, struct drbd_interval *i);
1531b30ab791SAndreas Gruenbacher extern int drbd_al_begin_io_nonblock(struct drbd_device *device, struct drbd_interval *i);
15324dd726f0SLars Ellenberg extern void drbd_al_begin_io_commit(struct drbd_device *device);
1533b30ab791SAndreas Gruenbacher extern bool drbd_al_begin_io_fastpath(struct drbd_device *device, struct drbd_interval *i);
15344dd726f0SLars Ellenberg extern void drbd_al_begin_io(struct drbd_device *device, struct drbd_interval *i);
1535b30ab791SAndreas Gruenbacher extern void drbd_al_complete_io(struct drbd_device *device, struct drbd_interval *i);
1536b30ab791SAndreas Gruenbacher extern void drbd_rs_complete_io(struct drbd_device *device, sector_t sector);
1537b30ab791SAndreas Gruenbacher extern int drbd_rs_begin_io(struct drbd_device *device, sector_t sector);
15380d11f3cfSChristoph Böhmwalder extern int drbd_try_rs_begin_io(struct drbd_peer_device *peer_device, sector_t sector);
1539b30ab791SAndreas Gruenbacher extern void drbd_rs_cancel_all(struct drbd_device *device);
1540b30ab791SAndreas Gruenbacher extern int drbd_rs_del_all(struct drbd_device *device);
15410d11f3cfSChristoph Böhmwalder extern void drbd_rs_failed_io(struct drbd_peer_device *peer_device,
1542b411b363SPhilipp Reisner 		sector_t sector, int size);
15430d11f3cfSChristoph Böhmwalder extern void drbd_advance_rs_marks(struct drbd_peer_device *peer_device, unsigned long still_to_go);
15445ab7d2c0SLars Ellenberg 
15455ab7d2c0SLars Ellenberg enum update_sync_bits_mode { RECORD_RS_FAILED, SET_OUT_OF_SYNC, SET_IN_SYNC };
15460d11f3cfSChristoph Böhmwalder extern int __drbd_change_sync(struct drbd_peer_device *peer_device, sector_t sector, int size,
1547179e20b8SAndreas Gruenbacher 		enum update_sync_bits_mode mode);
15480d11f3cfSChristoph Böhmwalder #define drbd_set_in_sync(peer_device, sector, size) \
15490d11f3cfSChristoph Böhmwalder 	__drbd_change_sync(peer_device, sector, size, SET_IN_SYNC)
15500d11f3cfSChristoph Böhmwalder #define drbd_set_out_of_sync(peer_device, sector, size) \
15510d11f3cfSChristoph Böhmwalder 	__drbd_change_sync(peer_device, sector, size, SET_OUT_OF_SYNC)
15520d11f3cfSChristoph Böhmwalder #define drbd_rs_failed_io(peer_device, sector, size) \
15530d11f3cfSChristoph Böhmwalder 	__drbd_change_sync(peer_device, sector, size, RECORD_RS_FAILED)
1554b30ab791SAndreas Gruenbacher extern void drbd_al_shrink(struct drbd_device *device);
15555f7c0124SLars Ellenberg extern int drbd_al_initialize(struct drbd_device *, void *);
1556b411b363SPhilipp Reisner 
1557b411b363SPhilipp Reisner /* drbd_nl.c */
15583b98c0c2SLars Ellenberg /* state info broadcast */
15593b98c0c2SLars Ellenberg struct sib_info {
15603b98c0c2SLars Ellenberg 	enum drbd_state_info_bcast_reason sib_reason;
15613b98c0c2SLars Ellenberg 	union {
15623b98c0c2SLars Ellenberg 		struct {
15633b98c0c2SLars Ellenberg 			char *helper_name;
15643b98c0c2SLars Ellenberg 			unsigned helper_exit_code;
15653b98c0c2SLars Ellenberg 		};
15663b98c0c2SLars Ellenberg 		struct {
15673b98c0c2SLars Ellenberg 			union drbd_state os;
15683b98c0c2SLars Ellenberg 			union drbd_state ns;
15693b98c0c2SLars Ellenberg 		};
15703b98c0c2SLars Ellenberg 	};
15713b98c0c2SLars Ellenberg };
1572b30ab791SAndreas Gruenbacher void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib);
1573b411b363SPhilipp Reisner 
1574aadb22baSLv Yunlong extern int notify_resource_state(struct sk_buff *,
1575a2972846SAndreas Gruenbacher 				  unsigned int,
1576a2972846SAndreas Gruenbacher 				  struct drbd_resource *,
1577a2972846SAndreas Gruenbacher 				  struct resource_info *,
1578a2972846SAndreas Gruenbacher 				  enum drbd_notification_type);
1579aadb22baSLv Yunlong extern int notify_device_state(struct sk_buff *,
1580a2972846SAndreas Gruenbacher 				unsigned int,
1581a2972846SAndreas Gruenbacher 				struct drbd_device *,
1582a2972846SAndreas Gruenbacher 				struct device_info *,
1583a2972846SAndreas Gruenbacher 				enum drbd_notification_type);
1584aadb22baSLv Yunlong extern int notify_connection_state(struct sk_buff *,
1585a2972846SAndreas Gruenbacher 				    unsigned int,
1586a2972846SAndreas Gruenbacher 				    struct drbd_connection *,
1587a2972846SAndreas Gruenbacher 				    struct connection_info *,
1588a2972846SAndreas Gruenbacher 				    enum drbd_notification_type);
1589aadb22baSLv Yunlong extern int notify_peer_device_state(struct sk_buff *,
1590a2972846SAndreas Gruenbacher 				     unsigned int,
1591a2972846SAndreas Gruenbacher 				     struct drbd_peer_device *,
1592a2972846SAndreas Gruenbacher 				     struct peer_device_info *,
1593a2972846SAndreas Gruenbacher 				     enum drbd_notification_type);
1594a2972846SAndreas Gruenbacher extern void notify_helper(enum drbd_notification_type, struct drbd_device *,
1595a2972846SAndreas Gruenbacher 			  struct drbd_connection *, const char *, int);
1596a2972846SAndreas Gruenbacher 
1597b411b363SPhilipp Reisner /*
1598b411b363SPhilipp Reisner  * inline helper functions
1599b411b363SPhilipp Reisner  *************************/
1600b411b363SPhilipp Reisner 
160145bb912bSLars Ellenberg /* see also page_chain_add and friends in drbd_receiver.c */
page_chain_next(struct page * page)160245bb912bSLars Ellenberg static inline struct page *page_chain_next(struct page *page)
160345bb912bSLars Ellenberg {
160445bb912bSLars Ellenberg 	return (struct page *)page_private(page);
160545bb912bSLars Ellenberg }
160645bb912bSLars Ellenberg #define page_chain_for_each(page) \
160745bb912bSLars Ellenberg 	for (; page && ({ prefetch(page_chain_next(page)); 1; }); \
160845bb912bSLars Ellenberg 			page = page_chain_next(page))
160945bb912bSLars Ellenberg #define page_chain_for_each_safe(page, n) \
161045bb912bSLars Ellenberg 	for (; page && ({ n = page_chain_next(page); 1; }); page = n)
161145bb912bSLars Ellenberg 
161245bb912bSLars Ellenberg 
drbd_peer_req_has_active_page(struct drbd_peer_request * peer_req)1613045417f7SAndreas Gruenbacher static inline int drbd_peer_req_has_active_page(struct drbd_peer_request *peer_req)
161445bb912bSLars Ellenberg {
1615db830c46SAndreas Gruenbacher 	struct page *page = peer_req->pages;
161645bb912bSLars Ellenberg 	page_chain_for_each(page) {
161745bb912bSLars Ellenberg 		if (page_count(page) > 1)
161845bb912bSLars Ellenberg 			return 1;
161945bb912bSLars Ellenberg 	}
162045bb912bSLars Ellenberg 	return 0;
162145bb912bSLars Ellenberg }
162245bb912bSLars Ellenberg 
drbd_read_state(struct drbd_device * device)1623b30ab791SAndreas Gruenbacher static inline union drbd_state drbd_read_state(struct drbd_device *device)
1624b411b363SPhilipp Reisner {
16256bbf53caSAndreas Gruenbacher 	struct drbd_resource *resource = device->resource;
162678bae59bSPhilipp Reisner 	union drbd_state rv;
162778bae59bSPhilipp Reisner 
1628b30ab791SAndreas Gruenbacher 	rv.i = device->state.i;
16296bbf53caSAndreas Gruenbacher 	rv.susp = resource->susp;
16306bbf53caSAndreas Gruenbacher 	rv.susp_nod = resource->susp_nod;
16316bbf53caSAndreas Gruenbacher 	rv.susp_fen = resource->susp_fen;
163278bae59bSPhilipp Reisner 
163378bae59bSPhilipp Reisner 	return rv;
1634b411b363SPhilipp Reisner }
1635b411b363SPhilipp Reisner 
1636383606e0SLars Ellenberg enum drbd_force_detach_flags {
1637a2a3c74fSLars Ellenberg 	DRBD_READ_ERROR,
1638a2a3c74fSLars Ellenberg 	DRBD_WRITE_ERROR,
1639383606e0SLars Ellenberg 	DRBD_META_IO_ERROR,
1640383606e0SLars Ellenberg 	DRBD_FORCE_DETACH,
1641383606e0SLars Ellenberg };
1642383606e0SLars Ellenberg 
1643b411b363SPhilipp Reisner #define __drbd_chk_io_error(m,f) __drbd_chk_io_error_(m,f, __func__)
__drbd_chk_io_error_(struct drbd_device * device,enum drbd_force_detach_flags df,const char * where)1644b30ab791SAndreas Gruenbacher static inline void __drbd_chk_io_error_(struct drbd_device *device,
1645a2a3c74fSLars Ellenberg 		enum drbd_force_detach_flags df,
1646383606e0SLars Ellenberg 		const char *where)
1647b411b363SPhilipp Reisner {
1648daeda1ccSPhilipp Reisner 	enum drbd_io_error_p ep;
1649daeda1ccSPhilipp Reisner 
1650daeda1ccSPhilipp Reisner 	rcu_read_lock();
1651b30ab791SAndreas Gruenbacher 	ep = rcu_dereference(device->ldev->disk_conf)->on_io_error;
1652daeda1ccSPhilipp Reisner 	rcu_read_unlock();
1653daeda1ccSPhilipp Reisner 	switch (ep) {
1654daeda1ccSPhilipp Reisner 	case EP_PASS_ON: /* FIXME would this be better named "Ignore"? */
1655a2a3c74fSLars Ellenberg 		if (df == DRBD_READ_ERROR || df == DRBD_WRITE_ERROR) {
1656e3fa02d7SChristoph Böhmwalder 			if (drbd_ratelimit())
1657d0180171SAndreas Gruenbacher 				drbd_err(device, "Local IO failed in %s.\n", where);
1658b30ab791SAndreas Gruenbacher 			if (device->state.disk > D_INCONSISTENT)
1659b30ab791SAndreas Gruenbacher 				_drbd_set_state(_NS(device, disk, D_INCONSISTENT), CS_HARD, NULL);
1660b411b363SPhilipp Reisner 			break;
1661b411b363SPhilipp Reisner 		}
1662df561f66SGustavo A. R. Silva 		fallthrough;	/* for DRBD_META_IO_ERROR or DRBD_FORCE_DETACH */
1663b411b363SPhilipp Reisner 	case EP_DETACH:
1664b411b363SPhilipp Reisner 	case EP_CALL_HELPER:
1665a2a3c74fSLars Ellenberg 		/* Remember whether we saw a READ or WRITE error.
1666a2a3c74fSLars Ellenberg 		 *
1667a2a3c74fSLars Ellenberg 		 * Recovery of the affected area for WRITE failure is covered
1668a2a3c74fSLars Ellenberg 		 * by the activity log.
1669a2a3c74fSLars Ellenberg 		 * READ errors may fall outside that area though. Certain READ
1670a2a3c74fSLars Ellenberg 		 * errors can be "healed" by writing good data to the affected
1671a2a3c74fSLars Ellenberg 		 * blocks, which triggers block re-allocation in lower layers.
1672a2a3c74fSLars Ellenberg 		 *
1673a2a3c74fSLars Ellenberg 		 * If we can not write the bitmap after a READ error,
1674a2a3c74fSLars Ellenberg 		 * we may need to trigger a full sync (see w_go_diskless()).
1675a2a3c74fSLars Ellenberg 		 *
1676a2a3c74fSLars Ellenberg 		 * Force-detach is not really an IO error, but rather a
1677a2a3c74fSLars Ellenberg 		 * desperate measure to try to deal with a completely
1678a2a3c74fSLars Ellenberg 		 * unresponsive lower level IO stack.
1679a2a3c74fSLars Ellenberg 		 * Still it should be treated as a WRITE error.
1680a2a3c74fSLars Ellenberg 		 *
1681a2a3c74fSLars Ellenberg 		 * Meta IO error is always WRITE error:
1682a2a3c74fSLars Ellenberg 		 * we read meta data only once during attach,
1683a2a3c74fSLars Ellenberg 		 * which will fail in case of errors.
1684a2a3c74fSLars Ellenberg 		 */
1685b30ab791SAndreas Gruenbacher 		set_bit(WAS_IO_ERROR, &device->flags);
1686a2a3c74fSLars Ellenberg 		if (df == DRBD_READ_ERROR)
1687b30ab791SAndreas Gruenbacher 			set_bit(WAS_READ_ERROR, &device->flags);
1688a2a3c74fSLars Ellenberg 		if (df == DRBD_FORCE_DETACH)
1689b30ab791SAndreas Gruenbacher 			set_bit(FORCE_DETACH, &device->flags);
1690b30ab791SAndreas Gruenbacher 		if (device->state.disk > D_FAILED) {
1691b30ab791SAndreas Gruenbacher 			_drbd_set_state(_NS(device, disk, D_FAILED), CS_HARD, NULL);
1692d0180171SAndreas Gruenbacher 			drbd_err(device,
169382f59cc6SLars Ellenberg 				"Local IO failed in %s. Detaching...\n", where);
1694b411b363SPhilipp Reisner 		}
1695b411b363SPhilipp Reisner 		break;
1696b411b363SPhilipp Reisner 	}
1697b411b363SPhilipp Reisner }
1698b411b363SPhilipp Reisner 
1699b411b363SPhilipp Reisner /**
1700b411b363SPhilipp Reisner  * drbd_chk_io_error: Handle the on_io_error setting, should be called from all io completion handlers
1701b30ab791SAndreas Gruenbacher  * @device:	 DRBD device.
1702b411b363SPhilipp Reisner  * @error:	 Error code passed to the IO completion callback
1703b411b363SPhilipp Reisner  * @forcedetach: Force detach. I.e. the error happened while accessing the meta data
1704b411b363SPhilipp Reisner  *
1705b411b363SPhilipp Reisner  * See also drbd_main.c:after_state_ch() if (os.disk > D_FAILED && ns.disk == D_FAILED)
1706b411b363SPhilipp Reisner  */
1707b411b363SPhilipp Reisner #define drbd_chk_io_error(m,e,f) drbd_chk_io_error_(m,e,f, __func__)
drbd_chk_io_error_(struct drbd_device * device,int error,enum drbd_force_detach_flags forcedetach,const char * where)1708b30ab791SAndreas Gruenbacher static inline void drbd_chk_io_error_(struct drbd_device *device,
1709383606e0SLars Ellenberg 	int error, enum drbd_force_detach_flags forcedetach, const char *where)
1710b411b363SPhilipp Reisner {
1711b411b363SPhilipp Reisner 	if (error) {
1712b411b363SPhilipp Reisner 		unsigned long flags;
17130500813fSAndreas Gruenbacher 		spin_lock_irqsave(&device->resource->req_lock, flags);
1714b30ab791SAndreas Gruenbacher 		__drbd_chk_io_error_(device, forcedetach, where);
17150500813fSAndreas Gruenbacher 		spin_unlock_irqrestore(&device->resource->req_lock, flags);
1716b411b363SPhilipp Reisner 	}
1717b411b363SPhilipp Reisner }
1718b411b363SPhilipp Reisner 
1719b411b363SPhilipp Reisner 
1720b411b363SPhilipp Reisner /**
1721b411b363SPhilipp Reisner  * drbd_md_first_sector() - Returns the first sector number of the meta data area
1722b411b363SPhilipp Reisner  * @bdev:	Meta data block device.
1723b411b363SPhilipp Reisner  *
1724b411b363SPhilipp Reisner  * BTW, for internal meta data, this happens to be the maximum capacity
1725b411b363SPhilipp Reisner  * we could agree upon with our peer node.
1726b411b363SPhilipp Reisner  */
drbd_md_first_sector(struct drbd_backing_dev * bdev)172768e41a43SLars Ellenberg static inline sector_t drbd_md_first_sector(struct drbd_backing_dev *bdev)
1728b411b363SPhilipp Reisner {
172968e41a43SLars Ellenberg 	switch (bdev->md.meta_dev_idx) {
1730b411b363SPhilipp Reisner 	case DRBD_MD_INDEX_INTERNAL:
1731b411b363SPhilipp Reisner 	case DRBD_MD_INDEX_FLEX_INT:
1732b411b363SPhilipp Reisner 		return bdev->md.md_offset + bdev->md.bm_offset;
1733b411b363SPhilipp Reisner 	case DRBD_MD_INDEX_FLEX_EXT:
1734b411b363SPhilipp Reisner 	default:
1735b411b363SPhilipp Reisner 		return bdev->md.md_offset;
1736b411b363SPhilipp Reisner 	}
1737b411b363SPhilipp Reisner }
1738b411b363SPhilipp Reisner 
1739b411b363SPhilipp Reisner /**
1740b411b363SPhilipp Reisner  * drbd_md_last_sector() - Return the last sector number of the meta data area
1741b411b363SPhilipp Reisner  * @bdev:	Meta data block device.
1742b411b363SPhilipp Reisner  */
drbd_md_last_sector(struct drbd_backing_dev * bdev)1743b411b363SPhilipp Reisner static inline sector_t drbd_md_last_sector(struct drbd_backing_dev *bdev)
1744b411b363SPhilipp Reisner {
174568e41a43SLars Ellenberg 	switch (bdev->md.meta_dev_idx) {
1746b411b363SPhilipp Reisner 	case DRBD_MD_INDEX_INTERNAL:
1747b411b363SPhilipp Reisner 	case DRBD_MD_INDEX_FLEX_INT:
1748ae8bf312SLars Ellenberg 		return bdev->md.md_offset + MD_4kB_SECT -1;
1749b411b363SPhilipp Reisner 	case DRBD_MD_INDEX_FLEX_EXT:
1750b411b363SPhilipp Reisner 	default:
1751ae8bf312SLars Ellenberg 		return bdev->md.md_offset + bdev->md.md_size_sect -1;
1752b411b363SPhilipp Reisner 	}
1753b411b363SPhilipp Reisner }
1754b411b363SPhilipp Reisner 
1755b411b363SPhilipp Reisner /* Returns the number of 512 byte sectors of the device */
drbd_get_capacity(struct block_device * bdev)1756b411b363SPhilipp Reisner static inline sector_t drbd_get_capacity(struct block_device *bdev)
1757b411b363SPhilipp Reisner {
1758da7b3924SChristoph Hellwig 	return bdev ? bdev_nr_sectors(bdev) : 0;
1759b411b363SPhilipp Reisner }
1760b411b363SPhilipp Reisner 
1761b411b363SPhilipp Reisner /**
1762b411b363SPhilipp Reisner  * drbd_get_max_capacity() - Returns the capacity we announce to out peer
1763b411b363SPhilipp Reisner  * @bdev:	Meta data block device.
1764b411b363SPhilipp Reisner  *
1765b411b363SPhilipp Reisner  * returns the capacity we announce to out peer.  we clip ourselves at the
1766b411b363SPhilipp Reisner  * various MAX_SECTORS, because if we don't, current implementation will
1767b411b363SPhilipp Reisner  * oops sooner or later
1768b411b363SPhilipp Reisner  */
drbd_get_max_capacity(struct drbd_backing_dev * bdev)1769b411b363SPhilipp Reisner static inline sector_t drbd_get_max_capacity(struct drbd_backing_dev *bdev)
1770b411b363SPhilipp Reisner {
1771b411b363SPhilipp Reisner 	sector_t s;
1772daeda1ccSPhilipp Reisner 
177368e41a43SLars Ellenberg 	switch (bdev->md.meta_dev_idx) {
1774b411b363SPhilipp Reisner 	case DRBD_MD_INDEX_INTERNAL:
1775b411b363SPhilipp Reisner 	case DRBD_MD_INDEX_FLEX_INT:
1776b411b363SPhilipp Reisner 		s = drbd_get_capacity(bdev->backing_bdev)
1777b411b363SPhilipp Reisner 			? min_t(sector_t, DRBD_MAX_SECTORS_FLEX,
177868e41a43SLars Ellenberg 				drbd_md_first_sector(bdev))
1779b411b363SPhilipp Reisner 			: 0;
1780b411b363SPhilipp Reisner 		break;
1781b411b363SPhilipp Reisner 	case DRBD_MD_INDEX_FLEX_EXT:
1782b411b363SPhilipp Reisner 		s = min_t(sector_t, DRBD_MAX_SECTORS_FLEX,
1783b411b363SPhilipp Reisner 				drbd_get_capacity(bdev->backing_bdev));
1784b411b363SPhilipp Reisner 		/* clip at maximum size the meta device can support */
1785b411b363SPhilipp Reisner 		s = min_t(sector_t, s,
1786b411b363SPhilipp Reisner 			BM_EXT_TO_SECT(bdev->md.md_size_sect
1787b411b363SPhilipp Reisner 				     - bdev->md.bm_offset));
1788b411b363SPhilipp Reisner 		break;
1789b411b363SPhilipp Reisner 	default:
1790b411b363SPhilipp Reisner 		s = min_t(sector_t, DRBD_MAX_SECTORS,
1791b411b363SPhilipp Reisner 				drbd_get_capacity(bdev->backing_bdev));
1792b411b363SPhilipp Reisner 	}
1793b411b363SPhilipp Reisner 	return s;
1794b411b363SPhilipp Reisner }
1795b411b363SPhilipp Reisner 
1796b411b363SPhilipp Reisner /**
17973a4d4eb3SLars Ellenberg  * drbd_md_ss() - Return the sector number of our meta data super block
1798b411b363SPhilipp Reisner  * @bdev:	Meta data block device.
1799b411b363SPhilipp Reisner  */
drbd_md_ss(struct drbd_backing_dev * bdev)18003a4d4eb3SLars Ellenberg static inline sector_t drbd_md_ss(struct drbd_backing_dev *bdev)
1801b411b363SPhilipp Reisner {
18023a4d4eb3SLars Ellenberg 	const int meta_dev_idx = bdev->md.meta_dev_idx;
1803daeda1ccSPhilipp Reisner 
18043a4d4eb3SLars Ellenberg 	if (meta_dev_idx == DRBD_MD_INDEX_FLEX_EXT)
1805b411b363SPhilipp Reisner 		return 0;
18063a4d4eb3SLars Ellenberg 
18073a4d4eb3SLars Ellenberg 	/* Since drbd08, internal meta data is always "flexible".
1808ae8bf312SLars Ellenberg 	 * position: last 4k aligned block of 4k size */
18093a4d4eb3SLars Ellenberg 	if (meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
18103a4d4eb3SLars Ellenberg 	    meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)
1811ae8bf312SLars Ellenberg 		return (drbd_get_capacity(bdev->backing_bdev) & ~7ULL) - 8;
18123a4d4eb3SLars Ellenberg 
18133a4d4eb3SLars Ellenberg 	/* external, some index; this is the old fixed size layout */
18143a4d4eb3SLars Ellenberg 	return MD_128MB_SECT * bdev->md.meta_dev_idx;
1815b411b363SPhilipp Reisner }
1816b411b363SPhilipp Reisner 
1817b411b363SPhilipp Reisner static inline void
drbd_queue_work(struct drbd_work_queue * q,struct drbd_work * w)1818b411b363SPhilipp Reisner drbd_queue_work(struct drbd_work_queue *q, struct drbd_work *w)
1819b411b363SPhilipp Reisner {
1820b411b363SPhilipp Reisner 	unsigned long flags;
1821b411b363SPhilipp Reisner 	spin_lock_irqsave(&q->q_lock, flags);
1822b411b363SPhilipp Reisner 	list_add_tail(&w->list, &q->q);
1823b411b363SPhilipp Reisner 	spin_unlock_irqrestore(&q->q_lock, flags);
18248c0785a5SLars Ellenberg 	wake_up(&q->q_wait);
1825b411b363SPhilipp Reisner }
1826b411b363SPhilipp Reisner 
1827e334f550SLars Ellenberg static inline void
drbd_queue_work_if_unqueued(struct drbd_work_queue * q,struct drbd_work * w)182815e26f6aSLars Ellenberg drbd_queue_work_if_unqueued(struct drbd_work_queue *q, struct drbd_work *w)
182915e26f6aSLars Ellenberg {
183015e26f6aSLars Ellenberg 	unsigned long flags;
183115e26f6aSLars Ellenberg 	spin_lock_irqsave(&q->q_lock, flags);
183215e26f6aSLars Ellenberg 	if (list_empty_careful(&w->list))
183315e26f6aSLars Ellenberg 		list_add_tail(&w->list, &q->q);
183415e26f6aSLars Ellenberg 	spin_unlock_irqrestore(&q->q_lock, flags);
183515e26f6aSLars Ellenberg 	wake_up(&q->q_wait);
183615e26f6aSLars Ellenberg }
183715e26f6aSLars Ellenberg 
183815e26f6aSLars Ellenberg static inline void
drbd_device_post_work(struct drbd_device * device,int work_bit)1839e334f550SLars Ellenberg drbd_device_post_work(struct drbd_device *device, int work_bit)
1840e334f550SLars Ellenberg {
1841e334f550SLars Ellenberg 	if (!test_and_set_bit(work_bit, &device->flags)) {
1842e334f550SLars Ellenberg 		struct drbd_connection *connection =
1843e334f550SLars Ellenberg 			first_peer_device(device)->connection;
1844e334f550SLars Ellenberg 		struct drbd_work_queue *q = &connection->sender_work;
1845e334f550SLars Ellenberg 		if (!test_and_set_bit(DEVICE_WORK_PENDING, &connection->flags))
1846e334f550SLars Ellenberg 			wake_up(&q->q_wait);
1847e334f550SLars Ellenberg 	}
1848e334f550SLars Ellenberg }
1849e334f550SLars Ellenberg 
1850b5043c5eSAndreas Gruenbacher extern void drbd_flush_workqueue(struct drbd_work_queue *work_queue);
1851b5043c5eSAndreas Gruenbacher 
1852668700b4SPhilipp Reisner /* To get the ack_receiver out of the blocking network stack,
1853668700b4SPhilipp Reisner  * so it can change its sk_rcvtimeo from idle- to ping-timeout,
1854668700b4SPhilipp Reisner  * and send a ping, we need to send a signal.
1855668700b4SPhilipp Reisner  * Which signal we send is irrelevant. */
wake_ack_receiver(struct drbd_connection * connection)1856668700b4SPhilipp Reisner static inline void wake_ack_receiver(struct drbd_connection *connection)
1857b411b363SPhilipp Reisner {
1858668700b4SPhilipp Reisner 	struct task_struct *task = connection->ack_receiver.task;
1859668700b4SPhilipp Reisner 	if (task && get_t_state(&connection->ack_receiver) == RUNNING)
1860fee10990SEric W. Biederman 		send_sig(SIGXCPU, task, 1);
1861b411b363SPhilipp Reisner }
1862b411b363SPhilipp Reisner 
request_ping(struct drbd_connection * connection)1863bde89a9eSAndreas Gruenbacher static inline void request_ping(struct drbd_connection *connection)
1864b411b363SPhilipp Reisner {
1865bde89a9eSAndreas Gruenbacher 	set_bit(SEND_PING, &connection->flags);
1866668700b4SPhilipp Reisner 	wake_ack_receiver(connection);
1867b411b363SPhilipp Reisner }
1868b411b363SPhilipp Reisner 
1869bde89a9eSAndreas Gruenbacher extern void *conn_prepare_command(struct drbd_connection *, struct drbd_socket *);
187069a22773SAndreas Gruenbacher extern void *drbd_prepare_command(struct drbd_peer_device *, struct drbd_socket *);
1871bde89a9eSAndreas Gruenbacher extern int conn_send_command(struct drbd_connection *, struct drbd_socket *,
1872dba58587SAndreas Gruenbacher 			     enum drbd_packet, unsigned int, void *,
1873dba58587SAndreas Gruenbacher 			     unsigned int);
187469a22773SAndreas Gruenbacher extern int drbd_send_command(struct drbd_peer_device *, struct drbd_socket *,
1875dba58587SAndreas Gruenbacher 			     enum drbd_packet, unsigned int, void *,
1876dba58587SAndreas Gruenbacher 			     unsigned int);
1877b411b363SPhilipp Reisner 
1878bde89a9eSAndreas Gruenbacher extern int drbd_send_ping(struct drbd_connection *connection);
1879bde89a9eSAndreas Gruenbacher extern int drbd_send_ping_ack(struct drbd_connection *connection);
188069a22773SAndreas Gruenbacher extern int drbd_send_state_req(struct drbd_peer_device *, union drbd_state, union drbd_state);
1881bde89a9eSAndreas Gruenbacher extern int conn_send_state_req(struct drbd_connection *, union drbd_state, union drbd_state);
1882b411b363SPhilipp Reisner 
drbd_thread_stop(struct drbd_thread * thi)1883b411b363SPhilipp Reisner static inline void drbd_thread_stop(struct drbd_thread *thi)
1884b411b363SPhilipp Reisner {
188581e84650SAndreas Gruenbacher 	_drbd_thread_stop(thi, false, true);
1886b411b363SPhilipp Reisner }
1887b411b363SPhilipp Reisner 
drbd_thread_stop_nowait(struct drbd_thread * thi)1888b411b363SPhilipp Reisner static inline void drbd_thread_stop_nowait(struct drbd_thread *thi)
1889b411b363SPhilipp Reisner {
189081e84650SAndreas Gruenbacher 	_drbd_thread_stop(thi, false, false);
1891b411b363SPhilipp Reisner }
1892b411b363SPhilipp Reisner 
drbd_thread_restart_nowait(struct drbd_thread * thi)1893b411b363SPhilipp Reisner static inline void drbd_thread_restart_nowait(struct drbd_thread *thi)
1894b411b363SPhilipp Reisner {
189581e84650SAndreas Gruenbacher 	_drbd_thread_stop(thi, true, false);
1896b411b363SPhilipp Reisner }
1897b411b363SPhilipp Reisner 
1898b411b363SPhilipp Reisner /* counts how many answer packets packets we expect from our peer,
1899b411b363SPhilipp Reisner  * for either explicit application requests,
1900b411b363SPhilipp Reisner  * or implicit barrier packets as necessary.
1901b411b363SPhilipp Reisner  * increased:
1902b411b363SPhilipp Reisner  *  w_send_barrier
19038554df1cSAndreas Gruenbacher  *  _req_mod(req, QUEUE_FOR_NET_WRITE or QUEUE_FOR_NET_READ);
1904b411b363SPhilipp Reisner  *    it is much easier and equally valid to count what we queue for the
1905b411b363SPhilipp Reisner  *    worker, even before it actually was queued or send.
1906b411b363SPhilipp Reisner  *    (drbd_make_request_common; recovery path on read io-error)
1907b411b363SPhilipp Reisner  * decreased:
1908b411b363SPhilipp Reisner  *  got_BarrierAck (respective tl_clear, tl_clear_barrier)
19098554df1cSAndreas Gruenbacher  *  _req_mod(req, DATA_RECEIVED)
1910b411b363SPhilipp Reisner  *     [from receive_DataReply]
19118554df1cSAndreas Gruenbacher  *  _req_mod(req, WRITE_ACKED_BY_PEER or RECV_ACKED_BY_PEER or NEG_ACKED)
1912b411b363SPhilipp Reisner  *     [from got_BlockAck (P_WRITE_ACK, P_RECV_ACK)]
1913b411b363SPhilipp Reisner  *     for some reason it is NOT decreased in got_NegAck,
1914b411b363SPhilipp Reisner  *     but in the resulting cleanup code from report_params.
1915b411b363SPhilipp Reisner  *     we should try to remember the reason for that...
19168554df1cSAndreas Gruenbacher  *  _req_mod(req, SEND_FAILED or SEND_CANCELED)
19178554df1cSAndreas Gruenbacher  *  _req_mod(req, CONNECTION_LOST_WHILE_PENDING)
1918b411b363SPhilipp Reisner  *     [from tl_clear_barrier]
1919b411b363SPhilipp Reisner  */
inc_ap_pending(struct drbd_device * device)1920b30ab791SAndreas Gruenbacher static inline void inc_ap_pending(struct drbd_device *device)
1921b411b363SPhilipp Reisner {
1922b30ab791SAndreas Gruenbacher 	atomic_inc(&device->ap_pending_cnt);
1923b411b363SPhilipp Reisner }
1924b411b363SPhilipp Reisner 
192533f7d316SAndreas Gruenbacher #define dec_ap_pending(device) ((void)expect((device), __dec_ap_pending(device) >= 0))
__dec_ap_pending(struct drbd_device * device)192633f7d316SAndreas Gruenbacher static inline int __dec_ap_pending(struct drbd_device *device)
192749559d87SPhilipp Reisner {
192833f7d316SAndreas Gruenbacher 	int ap_pending_cnt = atomic_dec_return(&device->ap_pending_cnt);
192933f7d316SAndreas Gruenbacher 
193033f7d316SAndreas Gruenbacher 	if (ap_pending_cnt == 0)
1931b30ab791SAndreas Gruenbacher 		wake_up(&device->misc_wait);
193233f7d316SAndreas Gruenbacher 	return ap_pending_cnt;
193349559d87SPhilipp Reisner }
1934b411b363SPhilipp Reisner 
1935b411b363SPhilipp Reisner /* counts how many resync-related answers we still expect from the peer
1936b411b363SPhilipp Reisner  *		     increase			decrease
1937b411b363SPhilipp Reisner  * C_SYNC_TARGET sends P_RS_DATA_REQUEST (and expects P_RS_DATA_REPLY)
193825985edcSLucas De Marchi  * C_SYNC_SOURCE sends P_RS_DATA_REPLY   (and expects P_WRITE_ACK with ID_SYNCER)
1939b411b363SPhilipp Reisner  *					   (or P_NEG_ACK with ID_SYNCER)
1940b411b363SPhilipp Reisner  */
inc_rs_pending(struct drbd_peer_device * peer_device)19410d11f3cfSChristoph Böhmwalder static inline void inc_rs_pending(struct drbd_peer_device *peer_device)
1942b411b363SPhilipp Reisner {
19430d11f3cfSChristoph Böhmwalder 	atomic_inc(&peer_device->device->rs_pending_cnt);
1944b411b363SPhilipp Reisner }
1945b411b363SPhilipp Reisner 
19460d11f3cfSChristoph Böhmwalder #define dec_rs_pending(peer_device) \
19470d11f3cfSChristoph Böhmwalder 	((void)expect((peer_device), __dec_rs_pending(peer_device) >= 0))
__dec_rs_pending(struct drbd_peer_device * peer_device)19480d11f3cfSChristoph Böhmwalder static inline int __dec_rs_pending(struct drbd_peer_device *peer_device)
194949559d87SPhilipp Reisner {
19500d11f3cfSChristoph Böhmwalder 	return atomic_dec_return(&peer_device->device->rs_pending_cnt);
195149559d87SPhilipp Reisner }
1952b411b363SPhilipp Reisner 
1953b411b363SPhilipp Reisner /* counts how many answers we still need to send to the peer.
1954b411b363SPhilipp Reisner  * increased on
1955b411b363SPhilipp Reisner  *  receive_Data	unless protocol A;
1956b411b363SPhilipp Reisner  *			we need to send a P_RECV_ACK (proto B)
1957b411b363SPhilipp Reisner  *			or P_WRITE_ACK (proto C)
1958b411b363SPhilipp Reisner  *  receive_RSDataReply (recv_resync_read) we need to send a P_WRITE_ACK
1959b411b363SPhilipp Reisner  *  receive_DataRequest (receive_RSDataRequest) we need to send back P_DATA
1960b411b363SPhilipp Reisner  *  receive_Barrier_*	we need to send a P_BARRIER_ACK
1961b411b363SPhilipp Reisner  */
inc_unacked(struct drbd_device * device)1962b30ab791SAndreas Gruenbacher static inline void inc_unacked(struct drbd_device *device)
1963b411b363SPhilipp Reisner {
1964b30ab791SAndreas Gruenbacher 	atomic_inc(&device->unacked_cnt);
1965b411b363SPhilipp Reisner }
1966b411b363SPhilipp Reisner 
196733f7d316SAndreas Gruenbacher #define dec_unacked(device) ((void)expect(device, __dec_unacked(device) >= 0))
__dec_unacked(struct drbd_device * device)196833f7d316SAndreas Gruenbacher static inline int __dec_unacked(struct drbd_device *device)
1969b411b363SPhilipp Reisner {
197033f7d316SAndreas Gruenbacher 	return atomic_dec_return(&device->unacked_cnt);
1971b411b363SPhilipp Reisner }
1972b411b363SPhilipp Reisner 
197333f7d316SAndreas Gruenbacher #define sub_unacked(device, n) ((void)expect(device, __sub_unacked(device) >= 0))
__sub_unacked(struct drbd_device * device,int n)197433f7d316SAndreas Gruenbacher static inline int __sub_unacked(struct drbd_device *device, int n)
1975b411b363SPhilipp Reisner {
197633f7d316SAndreas Gruenbacher 	return atomic_sub_return(n, &device->unacked_cnt);
1977b411b363SPhilipp Reisner }
1978b411b363SPhilipp Reisner 
is_sync_target_state(enum drbd_conns connection_state)19795052fee2SLars Ellenberg static inline bool is_sync_target_state(enum drbd_conns connection_state)
19805052fee2SLars Ellenberg {
19815052fee2SLars Ellenberg 	return	connection_state == C_SYNC_TARGET ||
19825052fee2SLars Ellenberg 		connection_state == C_PAUSED_SYNC_T;
19835052fee2SLars Ellenberg }
19845052fee2SLars Ellenberg 
is_sync_source_state(enum drbd_conns connection_state)19855052fee2SLars Ellenberg static inline bool is_sync_source_state(enum drbd_conns connection_state)
19865052fee2SLars Ellenberg {
19875052fee2SLars Ellenberg 	return	connection_state == C_SYNC_SOURCE ||
19885052fee2SLars Ellenberg 		connection_state == C_PAUSED_SYNC_S;
19895052fee2SLars Ellenberg }
19905052fee2SLars Ellenberg 
is_sync_state(enum drbd_conns connection_state)19915ab7d2c0SLars Ellenberg static inline bool is_sync_state(enum drbd_conns connection_state)
19925ab7d2c0SLars Ellenberg {
19935052fee2SLars Ellenberg 	return	is_sync_source_state(connection_state) ||
19945052fee2SLars Ellenberg 		is_sync_target_state(connection_state);
19955ab7d2c0SLars Ellenberg }
19965ab7d2c0SLars Ellenberg 
1997b411b363SPhilipp Reisner /**
1998b30ab791SAndreas Gruenbacher  * get_ldev() - Increase the ref count on device->ldev. Returns 0 if there is no ldev
1999d1b80853SAndreas Gruenbacher  * @_device:		DRBD device.
2000d1b80853SAndreas Gruenbacher  * @_min_state:		Minimum device state required for success.
2001b411b363SPhilipp Reisner  *
2002b30ab791SAndreas Gruenbacher  * You have to call put_ldev() when finished working with device->ldev.
2003b411b363SPhilipp Reisner  */
2004d1b80853SAndreas Gruenbacher #define get_ldev_if_state(_device, _min_state)				\
2005d1b80853SAndreas Gruenbacher 	(_get_ldev_if_state((_device), (_min_state)) ?			\
2006d1b80853SAndreas Gruenbacher 	 ({ __acquire(x); true; }) : false)
2007d1b80853SAndreas Gruenbacher #define get_ldev(_device) get_ldev_if_state(_device, D_INCONSISTENT)
2008b411b363SPhilipp Reisner 
put_ldev(struct drbd_device * device)2009b30ab791SAndreas Gruenbacher static inline void put_ldev(struct drbd_device *device)
2010b411b363SPhilipp Reisner {
201111f8b2b6SAndreas Gruenbacher 	enum drbd_disk_state disk_state = device->state.disk;
2012ba3c6fb8SLars Ellenberg 	/* We must check the state *before* the atomic_dec becomes visible,
2013ba3c6fb8SLars Ellenberg 	 * or we have a theoretical race where someone hitting zero,
2014ba3c6fb8SLars Ellenberg 	 * while state still D_FAILED, will then see D_DISKLESS in the
2015ba3c6fb8SLars Ellenberg 	 * condition below and calling into destroy, where he must not, yet. */
2016b30ab791SAndreas Gruenbacher 	int i = atomic_dec_return(&device->local_cnt);
20179a0d9d03SLars Ellenberg 
20189a0d9d03SLars Ellenberg 	/* This may be called from some endio handler,
20199a0d9d03SLars Ellenberg 	 * so we must not sleep here. */
20209a0d9d03SLars Ellenberg 
2021b411b363SPhilipp Reisner 	__release(local);
20220b0ba1efSAndreas Gruenbacher 	D_ASSERT(device, i >= 0);
2023e9e6f3ecSLars Ellenberg 	if (i == 0) {
202411f8b2b6SAndreas Gruenbacher 		if (disk_state == D_DISKLESS)
202582f59cc6SLars Ellenberg 			/* even internal references gone, safe to destroy */
2026e334f550SLars Ellenberg 			drbd_device_post_work(device, DESTROY_DISK);
202711f8b2b6SAndreas Gruenbacher 		if (disk_state == D_FAILED)
202882f59cc6SLars Ellenberg 			/* all application IO references gone. */
2029e334f550SLars Ellenberg 			if (!test_and_set_bit(GOING_DISKLESS, &device->flags))
2030e334f550SLars Ellenberg 				drbd_device_post_work(device, GO_DISKLESS);
2031b30ab791SAndreas Gruenbacher 		wake_up(&device->misc_wait);
2032b411b363SPhilipp Reisner 	}
2033e9e6f3ecSLars Ellenberg }
2034b411b363SPhilipp Reisner 
2035b411b363SPhilipp Reisner #ifndef __CHECKER__
_get_ldev_if_state(struct drbd_device * device,enum drbd_disk_state mins)2036b30ab791SAndreas Gruenbacher static inline int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins)
2037b411b363SPhilipp Reisner {
2038b411b363SPhilipp Reisner 	int io_allowed;
2039b411b363SPhilipp Reisner 
204082f59cc6SLars Ellenberg 	/* never get a reference while D_DISKLESS */
2041b30ab791SAndreas Gruenbacher 	if (device->state.disk == D_DISKLESS)
204282f59cc6SLars Ellenberg 		return 0;
204382f59cc6SLars Ellenberg 
2044b30ab791SAndreas Gruenbacher 	atomic_inc(&device->local_cnt);
2045b30ab791SAndreas Gruenbacher 	io_allowed = (device->state.disk >= mins);
2046b411b363SPhilipp Reisner 	if (!io_allowed)
2047b30ab791SAndreas Gruenbacher 		put_ldev(device);
2048b411b363SPhilipp Reisner 	return io_allowed;
2049b411b363SPhilipp Reisner }
2050b411b363SPhilipp Reisner #else
2051b30ab791SAndreas Gruenbacher extern int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins);
2052b411b363SPhilipp Reisner #endif
2053b411b363SPhilipp Reisner 
2054b411b363SPhilipp Reisner /* this throttles on-the-fly application requests
2055b411b363SPhilipp Reisner  * according to max_buffers settings;
2056b411b363SPhilipp Reisner  * maybe re-implement using semaphores? */
drbd_get_max_buffers(struct drbd_device * device)2057b30ab791SAndreas Gruenbacher static inline int drbd_get_max_buffers(struct drbd_device *device)
2058b411b363SPhilipp Reisner {
205944ed167dSPhilipp Reisner 	struct net_conf *nc;
206044ed167dSPhilipp Reisner 	int mxb;
206144ed167dSPhilipp Reisner 
206244ed167dSPhilipp Reisner 	rcu_read_lock();
2063a6b32bc3SAndreas Gruenbacher 	nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
206444ed167dSPhilipp Reisner 	mxb = nc ? nc->max_buffers : 1000000;  /* arbitrary limit on open requests */
206544ed167dSPhilipp Reisner 	rcu_read_unlock();
206644ed167dSPhilipp Reisner 
2067b411b363SPhilipp Reisner 	return mxb;
2068b411b363SPhilipp Reisner }
2069b411b363SPhilipp Reisner 
drbd_state_is_stable(struct drbd_device * device)2070b30ab791SAndreas Gruenbacher static inline int drbd_state_is_stable(struct drbd_device *device)
2071b411b363SPhilipp Reisner {
2072b30ab791SAndreas Gruenbacher 	union drbd_dev_state s = device->state;
2073b411b363SPhilipp Reisner 
2074b411b363SPhilipp Reisner 	/* DO NOT add a default clause, we want the compiler to warn us
2075b411b363SPhilipp Reisner 	 * for any newly introduced state we may have forgotten to add here */
2076b411b363SPhilipp Reisner 
2077b411b363SPhilipp Reisner 	switch ((enum drbd_conns)s.conn) {
2078b411b363SPhilipp Reisner 	/* new io only accepted when there is no connection, ... */
2079b411b363SPhilipp Reisner 	case C_STANDALONE:
2080b411b363SPhilipp Reisner 	case C_WF_CONNECTION:
2081b411b363SPhilipp Reisner 	/* ... or there is a well established connection. */
2082b411b363SPhilipp Reisner 	case C_CONNECTED:
2083b411b363SPhilipp Reisner 	case C_SYNC_SOURCE:
2084b411b363SPhilipp Reisner 	case C_SYNC_TARGET:
2085b411b363SPhilipp Reisner 	case C_VERIFY_S:
2086b411b363SPhilipp Reisner 	case C_VERIFY_T:
2087b411b363SPhilipp Reisner 	case C_PAUSED_SYNC_S:
2088b411b363SPhilipp Reisner 	case C_PAUSED_SYNC_T:
208967531718SPhilipp Reisner 	case C_AHEAD:
209067531718SPhilipp Reisner 	case C_BEHIND:
20913719094eSPhilipp Reisner 		/* transitional states, IO allowed */
2092b411b363SPhilipp Reisner 	case C_DISCONNECTING:
2093b411b363SPhilipp Reisner 	case C_UNCONNECTED:
2094b411b363SPhilipp Reisner 	case C_TIMEOUT:
2095b411b363SPhilipp Reisner 	case C_BROKEN_PIPE:
2096b411b363SPhilipp Reisner 	case C_NETWORK_FAILURE:
2097b411b363SPhilipp Reisner 	case C_PROTOCOL_ERROR:
2098b411b363SPhilipp Reisner 	case C_TEAR_DOWN:
2099b411b363SPhilipp Reisner 	case C_WF_REPORT_PARAMS:
2100b411b363SPhilipp Reisner 	case C_STARTING_SYNC_S:
2101b411b363SPhilipp Reisner 	case C_STARTING_SYNC_T:
21023719094eSPhilipp Reisner 		break;
21033719094eSPhilipp Reisner 
21043719094eSPhilipp Reisner 		/* Allow IO in BM exchange states with new protocols */
2105b411b363SPhilipp Reisner 	case C_WF_BITMAP_S:
2106a6b32bc3SAndreas Gruenbacher 		if (first_peer_device(device)->connection->agreed_pro_version < 96)
21073719094eSPhilipp Reisner 			return 0;
21083719094eSPhilipp Reisner 		break;
21093719094eSPhilipp Reisner 
21103719094eSPhilipp Reisner 		/* no new io accepted in these states */
2111b411b363SPhilipp Reisner 	case C_WF_BITMAP_T:
2112b411b363SPhilipp Reisner 	case C_WF_SYNC_UUID:
2113b411b363SPhilipp Reisner 	case C_MASK:
2114b411b363SPhilipp Reisner 		/* not "stable" */
2115b411b363SPhilipp Reisner 		return 0;
2116b411b363SPhilipp Reisner 	}
2117b411b363SPhilipp Reisner 
2118b411b363SPhilipp Reisner 	switch ((enum drbd_disk_state)s.disk) {
2119b411b363SPhilipp Reisner 	case D_DISKLESS:
2120b411b363SPhilipp Reisner 	case D_INCONSISTENT:
2121b411b363SPhilipp Reisner 	case D_OUTDATED:
2122b411b363SPhilipp Reisner 	case D_CONSISTENT:
2123b411b363SPhilipp Reisner 	case D_UP_TO_DATE:
21245ca1de03SPhilipp Reisner 	case D_FAILED:
2125b411b363SPhilipp Reisner 		/* disk state is stable as well. */
2126b411b363SPhilipp Reisner 		break;
2127b411b363SPhilipp Reisner 
2128d942ae44SPhilipp Reisner 	/* no new io accepted during transitional states */
2129b411b363SPhilipp Reisner 	case D_ATTACHING:
2130b411b363SPhilipp Reisner 	case D_NEGOTIATING:
2131b411b363SPhilipp Reisner 	case D_UNKNOWN:
2132b411b363SPhilipp Reisner 	case D_MASK:
2133b411b363SPhilipp Reisner 		/* not "stable" */
2134b411b363SPhilipp Reisner 		return 0;
2135b411b363SPhilipp Reisner 	}
2136b411b363SPhilipp Reisner 
2137b411b363SPhilipp Reisner 	return 1;
2138b411b363SPhilipp Reisner }
2139b411b363SPhilipp Reisner 
drbd_suspended(struct drbd_device * device)2140b30ab791SAndreas Gruenbacher static inline int drbd_suspended(struct drbd_device *device)
2141fb22c402SPhilipp Reisner {
21426bbf53caSAndreas Gruenbacher 	struct drbd_resource *resource = device->resource;
21438e0af25fSPhilipp Reisner 
21446bbf53caSAndreas Gruenbacher 	return resource->susp || resource->susp_fen || resource->susp_nod;
2145fb22c402SPhilipp Reisner }
2146fb22c402SPhilipp Reisner 
may_inc_ap_bio(struct drbd_device * device)2147b30ab791SAndreas Gruenbacher static inline bool may_inc_ap_bio(struct drbd_device *device)
2148b411b363SPhilipp Reisner {
2149b30ab791SAndreas Gruenbacher 	int mxb = drbd_get_max_buffers(device);
2150b411b363SPhilipp Reisner 
2151b30ab791SAndreas Gruenbacher 	if (drbd_suspended(device))
21521b881ef7SAndreas Gruenbacher 		return false;
21537dbb4386SPhilipp Reisner 	if (atomic_read(&device->suspend_cnt))
21541b881ef7SAndreas Gruenbacher 		return false;
2155b411b363SPhilipp Reisner 
2156b411b363SPhilipp Reisner 	/* to avoid potential deadlock or bitmap corruption,
2157b411b363SPhilipp Reisner 	 * in various places, we only allow new application io
2158b411b363SPhilipp Reisner 	 * to start during "stable" states. */
2159b411b363SPhilipp Reisner 
2160b411b363SPhilipp Reisner 	/* no new io accepted when attaching or detaching the disk */
2161b30ab791SAndreas Gruenbacher 	if (!drbd_state_is_stable(device))
21621b881ef7SAndreas Gruenbacher 		return false;
2163b411b363SPhilipp Reisner 
2164b411b363SPhilipp Reisner 	/* since some older kernels don't have atomic_add_unless,
2165b411b363SPhilipp Reisner 	 * and we are within the spinlock anyways, we have this workaround.  */
2166b30ab791SAndreas Gruenbacher 	if (atomic_read(&device->ap_bio_cnt) > mxb)
21671b881ef7SAndreas Gruenbacher 		return false;
2168b30ab791SAndreas Gruenbacher 	if (test_bit(BITMAP_IO, &device->flags))
21691b881ef7SAndreas Gruenbacher 		return false;
21701b881ef7SAndreas Gruenbacher 	return true;
2171b411b363SPhilipp Reisner }
2172b411b363SPhilipp Reisner 
inc_ap_bio_cond(struct drbd_device * device)2173b30ab791SAndreas Gruenbacher static inline bool inc_ap_bio_cond(struct drbd_device *device)
21748869d683SPhilipp Reisner {
21751b881ef7SAndreas Gruenbacher 	bool rv = false;
21768869d683SPhilipp Reisner 
21770500813fSAndreas Gruenbacher 	spin_lock_irq(&device->resource->req_lock);
2178b30ab791SAndreas Gruenbacher 	rv = may_inc_ap_bio(device);
21798869d683SPhilipp Reisner 	if (rv)
2180b30ab791SAndreas Gruenbacher 		atomic_inc(&device->ap_bio_cnt);
21810500813fSAndreas Gruenbacher 	spin_unlock_irq(&device->resource->req_lock);
21828869d683SPhilipp Reisner 
21838869d683SPhilipp Reisner 	return rv;
21848869d683SPhilipp Reisner }
21858869d683SPhilipp Reisner 
inc_ap_bio(struct drbd_device * device)2186b30ab791SAndreas Gruenbacher static inline void inc_ap_bio(struct drbd_device *device)
2187b411b363SPhilipp Reisner {
2188b411b363SPhilipp Reisner 	/* we wait here
2189b411b363SPhilipp Reisner 	 *    as long as the device is suspended
2190b411b363SPhilipp Reisner 	 *    until the bitmap is no longer on the fly during connection
2191d942ae44SPhilipp Reisner 	 *    handshake as long as we would exceed the max_buffer limit.
2192b411b363SPhilipp Reisner 	 *
2193b411b363SPhilipp Reisner 	 * to avoid races with the reconnect code,
2194b411b363SPhilipp Reisner 	 * we need to atomic_inc within the spinlock. */
2195b411b363SPhilipp Reisner 
2196b30ab791SAndreas Gruenbacher 	wait_event(device->misc_wait, inc_ap_bio_cond(device));
2197b411b363SPhilipp Reisner }
2198b411b363SPhilipp Reisner 
dec_ap_bio(struct drbd_device * device)2199b30ab791SAndreas Gruenbacher static inline void dec_ap_bio(struct drbd_device *device)
2200b411b363SPhilipp Reisner {
2201b30ab791SAndreas Gruenbacher 	int mxb = drbd_get_max_buffers(device);
2202b30ab791SAndreas Gruenbacher 	int ap_bio = atomic_dec_return(&device->ap_bio_cnt);
2203b411b363SPhilipp Reisner 
22040b0ba1efSAndreas Gruenbacher 	D_ASSERT(device, ap_bio >= 0);
22057ee1fb93SLars Ellenberg 
2206b30ab791SAndreas Gruenbacher 	if (ap_bio == 0 && test_bit(BITMAP_IO, &device->flags)) {
2207b30ab791SAndreas Gruenbacher 		if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags))
220884b8c06bSAndreas Gruenbacher 			drbd_queue_work(&first_peer_device(device)->
220984b8c06bSAndreas Gruenbacher 				connection->sender_work,
221084b8c06bSAndreas Gruenbacher 				&device->bm_io_work.w);
22117ee1fb93SLars Ellenberg 	}
22127ee1fb93SLars Ellenberg 
2213b411b363SPhilipp Reisner 	/* this currently does wake_up for every dec_ap_bio!
2214b411b363SPhilipp Reisner 	 * maybe rather introduce some type of hysteresis?
2215b411b363SPhilipp Reisner 	 * e.g. (ap_bio == mxb/2 || ap_bio == 0) ? */
2216b411b363SPhilipp Reisner 	if (ap_bio < mxb)
2217b30ab791SAndreas Gruenbacher 		wake_up(&device->misc_wait);
2218b411b363SPhilipp Reisner }
2219b411b363SPhilipp Reisner 
verify_can_do_stop_sector(struct drbd_device * device)2220b30ab791SAndreas Gruenbacher static inline bool verify_can_do_stop_sector(struct drbd_device *device)
222158ffa580SLars Ellenberg {
2222a6b32bc3SAndreas Gruenbacher 	return first_peer_device(device)->connection->agreed_pro_version >= 97 &&
2223a6b32bc3SAndreas Gruenbacher 		first_peer_device(device)->connection->agreed_pro_version != 100;
222458ffa580SLars Ellenberg }
222558ffa580SLars Ellenberg 
drbd_set_ed_uuid(struct drbd_device * device,u64 val)2226b30ab791SAndreas Gruenbacher static inline int drbd_set_ed_uuid(struct drbd_device *device, u64 val)
2227b411b363SPhilipp Reisner {
2228b30ab791SAndreas Gruenbacher 	int changed = device->ed_uuid != val;
2229b30ab791SAndreas Gruenbacher 	device->ed_uuid = val;
223062b0da3aSLars Ellenberg 	return changed;
2231b411b363SPhilipp Reisner }
2232b411b363SPhilipp Reisner 
drbd_queue_order_type(struct drbd_device * device)2233b30ab791SAndreas Gruenbacher static inline int drbd_queue_order_type(struct drbd_device *device)
2234b411b363SPhilipp Reisner {
2235b411b363SPhilipp Reisner 	/* sorry, we currently have no working implementation
2236b411b363SPhilipp Reisner 	 * of distributed TCQ stuff */
2237b411b363SPhilipp Reisner #ifndef QUEUE_ORDERED_NONE
2238b411b363SPhilipp Reisner #define QUEUE_ORDERED_NONE 0
2239b411b363SPhilipp Reisner #endif
2240b411b363SPhilipp Reisner 	return QUEUE_ORDERED_NONE;
2241b411b363SPhilipp Reisner }
2242b411b363SPhilipp Reisner 
first_connection(struct drbd_resource * resource)224377c556f6SAndreas Gruenbacher static inline struct drbd_connection *first_connection(struct drbd_resource *resource)
224477c556f6SAndreas Gruenbacher {
2245ec4a3407SLars Ellenberg 	return list_first_entry_or_null(&resource->connections,
224677c556f6SAndreas Gruenbacher 				struct drbd_connection, connections);
224777c556f6SAndreas Gruenbacher }
224877c556f6SAndreas Gruenbacher 
2249b411b363SPhilipp Reisner #endif
2250