1b411b363SPhilipp Reisner /* 2b411b363SPhilipp Reisner drbd_int.h 3b411b363SPhilipp Reisner 4b411b363SPhilipp Reisner This file is part of DRBD by Philipp Reisner and Lars Ellenberg. 5b411b363SPhilipp Reisner 6b411b363SPhilipp Reisner Copyright (C) 2001-2008, LINBIT Information Technologies GmbH. 7b411b363SPhilipp Reisner Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>. 8b411b363SPhilipp Reisner Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. 9b411b363SPhilipp Reisner 10b411b363SPhilipp Reisner drbd is free software; you can redistribute it and/or modify 11b411b363SPhilipp Reisner it under the terms of the GNU General Public License as published by 12b411b363SPhilipp Reisner the Free Software Foundation; either version 2, or (at your option) 13b411b363SPhilipp Reisner any later version. 14b411b363SPhilipp Reisner 15b411b363SPhilipp Reisner drbd is distributed in the hope that it will be useful, 16b411b363SPhilipp Reisner but WITHOUT ANY WARRANTY; without even the implied warranty of 17b411b363SPhilipp Reisner MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18b411b363SPhilipp Reisner GNU General Public License for more details. 19b411b363SPhilipp Reisner 20b411b363SPhilipp Reisner You should have received a copy of the GNU General Public License 21b411b363SPhilipp Reisner along with drbd; see the file COPYING. If not, write to 22b411b363SPhilipp Reisner the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 23b411b363SPhilipp Reisner 24b411b363SPhilipp Reisner */ 25b411b363SPhilipp Reisner 26b411b363SPhilipp Reisner #ifndef _DRBD_INT_H 27b411b363SPhilipp Reisner #define _DRBD_INT_H 28b411b363SPhilipp Reisner 29b411b363SPhilipp Reisner #include <linux/compiler.h> 30b411b363SPhilipp Reisner #include <linux/types.h> 31b411b363SPhilipp Reisner #include <linux/list.h> 32b411b363SPhilipp Reisner #include <linux/sched.h> 33b411b363SPhilipp Reisner #include <linux/bitops.h> 34b411b363SPhilipp Reisner #include <linux/slab.h> 35b411b363SPhilipp Reisner #include <linux/crypto.h> 36132cc538SRandy Dunlap #include <linux/ratelimit.h> 37b411b363SPhilipp Reisner #include <linux/tcp.h> 38b411b363SPhilipp Reisner #include <linux/mutex.h> 39b411b363SPhilipp Reisner #include <linux/major.h> 40b411b363SPhilipp Reisner #include <linux/blkdev.h> 41b411b363SPhilipp Reisner #include <linux/genhd.h> 42062e879cSPhilipp Reisner #include <linux/idr.h> 43b411b363SPhilipp Reisner #include <net/tcp.h> 44b411b363SPhilipp Reisner #include <linux/lru_cache.h> 4570c71606SPaul Gortmaker #include <linux/prefetch.h> 463b98c0c2SLars Ellenberg #include <linux/drbd_genl_api.h> 47b8907339SPhilipp Reisner #include <linux/drbd.h> 48d9f65229SAndreas Gruenbacher #include "drbd_strings.h" 49b8907339SPhilipp Reisner #include "drbd_state.h" 50a3603a6eSAndreas Gruenbacher #include "drbd_protocol.h" 51b411b363SPhilipp Reisner 52b411b363SPhilipp Reisner #ifdef __CHECKER__ 53b411b363SPhilipp Reisner # define __protected_by(x) __attribute__((require_context(x,1,999,"rdwr"))) 54b411b363SPhilipp Reisner # define __protected_read_by(x) __attribute__((require_context(x,1,999,"read"))) 55b411b363SPhilipp Reisner # define __protected_write_by(x) __attribute__((require_context(x,1,999,"write"))) 56b411b363SPhilipp Reisner # define __must_hold(x) __attribute__((context(x,1,1), require_context(x,1,999,"call"))) 57b411b363SPhilipp Reisner #else 58b411b363SPhilipp Reisner # define __protected_by(x) 59b411b363SPhilipp Reisner # define __protected_read_by(x) 60b411b363SPhilipp Reisner # define __protected_write_by(x) 61b411b363SPhilipp Reisner # define __must_hold(x) 62b411b363SPhilipp Reisner #endif 63b411b363SPhilipp Reisner 64b411b363SPhilipp Reisner #define __no_warn(lock, stmt) do { __acquire(lock); stmt; __release(lock); } while (0) 65b411b363SPhilipp Reisner 66b411b363SPhilipp Reisner /* module parameter, defined in drbd_main.c */ 67b411b363SPhilipp Reisner extern unsigned int minor_count; 6890ab5ee9SRusty Russell extern bool disable_sendpage; 6990ab5ee9SRusty Russell extern bool allow_oos; 70b30ab791SAndreas Gruenbacher void tl_abort_disk_io(struct drbd_device *device); 71b411b363SPhilipp Reisner 72b411b363SPhilipp Reisner #ifdef CONFIG_DRBD_FAULT_INJECTION 73b411b363SPhilipp Reisner extern int enable_faults; 74b411b363SPhilipp Reisner extern int fault_rate; 75b411b363SPhilipp Reisner extern int fault_devs; 76b411b363SPhilipp Reisner #endif 77b411b363SPhilipp Reisner 78b411b363SPhilipp Reisner extern char usermode_helper[]; 79b411b363SPhilipp Reisner 80b411b363SPhilipp Reisner 81b411b363SPhilipp Reisner /* I don't remember why XCPU ... 82b411b363SPhilipp Reisner * This is used to wake the asender, 83b411b363SPhilipp Reisner * and to interrupt sending the sending task 84b411b363SPhilipp Reisner * on disconnect. 85b411b363SPhilipp Reisner */ 86b411b363SPhilipp Reisner #define DRBD_SIG SIGXCPU 87b411b363SPhilipp Reisner 88b411b363SPhilipp Reisner /* This is used to stop/restart our threads. 89b411b363SPhilipp Reisner * Cannot use SIGTERM nor SIGKILL, since these 90b411b363SPhilipp Reisner * are sent out by init on runlevel changes 91b411b363SPhilipp Reisner * I choose SIGHUP for now. 92b411b363SPhilipp Reisner */ 93b411b363SPhilipp Reisner #define DRBD_SIGKILL SIGHUP 94b411b363SPhilipp Reisner 95b411b363SPhilipp Reisner #define ID_IN_SYNC (4711ULL) 96b411b363SPhilipp Reisner #define ID_OUT_OF_SYNC (4712ULL) 97b411b363SPhilipp Reisner #define ID_SYNCER (-1ULL) 98579b57edSAndreas Gruenbacher 994a23f264SPhilipp Reisner #define UUID_NEW_BM_OFFSET ((u64)0x0001000000000000ULL) 100b411b363SPhilipp Reisner 10154761697SAndreas Gruenbacher struct drbd_device; 102bde89a9eSAndreas Gruenbacher struct drbd_connection; 103b411b363SPhilipp Reisner 1043b52beffSAndreas Gruenbacher #define __drbd_printk_device(level, device, fmt, args...) \ 1053b52beffSAndreas Gruenbacher dev_printk(level, disk_to_dev((device)->vdisk), fmt, ## args) 1063b52beffSAndreas Gruenbacher #define __drbd_printk_peer_device(level, peer_device, fmt, args...) \ 1073b52beffSAndreas Gruenbacher dev_printk(level, disk_to_dev((peer_device)->device->vdisk), fmt, ## args) 1083b52beffSAndreas Gruenbacher #define __drbd_printk_resource(level, resource, fmt, args...) \ 1093b52beffSAndreas Gruenbacher printk(level "drbd %s: " fmt, (resource)->name, ## args) 1103b52beffSAndreas Gruenbacher #define __drbd_printk_connection(level, connection, fmt, args...) \ 1113b52beffSAndreas Gruenbacher printk(level "drbd %s: " fmt, (connection)->resource->name, ## args) 112b411b363SPhilipp Reisner 1133b52beffSAndreas Gruenbacher void drbd_printk_with_wrong_object_type(void); 1143b52beffSAndreas Gruenbacher 1153b52beffSAndreas Gruenbacher #define __drbd_printk_if_same_type(obj, type, func, level, fmt, args...) \ 1163b52beffSAndreas Gruenbacher (__builtin_types_compatible_p(typeof(obj), type) || \ 1173b52beffSAndreas Gruenbacher __builtin_types_compatible_p(typeof(obj), const type)), \ 1183b52beffSAndreas Gruenbacher func(level, (const type)(obj), fmt, ## args) 1193b52beffSAndreas Gruenbacher 1203b52beffSAndreas Gruenbacher #define drbd_printk(level, obj, fmt, args...) \ 1213b52beffSAndreas Gruenbacher __builtin_choose_expr( \ 1223b52beffSAndreas Gruenbacher __drbd_printk_if_same_type(obj, struct drbd_device *, \ 1233b52beffSAndreas Gruenbacher __drbd_printk_device, level, fmt, ## args), \ 1243b52beffSAndreas Gruenbacher __builtin_choose_expr( \ 1253b52beffSAndreas Gruenbacher __drbd_printk_if_same_type(obj, struct drbd_resource *, \ 1263b52beffSAndreas Gruenbacher __drbd_printk_resource, level, fmt, ## args), \ 1273b52beffSAndreas Gruenbacher __builtin_choose_expr( \ 1283b52beffSAndreas Gruenbacher __drbd_printk_if_same_type(obj, struct drbd_connection *, \ 1293b52beffSAndreas Gruenbacher __drbd_printk_connection, level, fmt, ## args), \ 1303b52beffSAndreas Gruenbacher __builtin_choose_expr( \ 1313b52beffSAndreas Gruenbacher __drbd_printk_if_same_type(obj, struct drbd_peer_device *, \ 1323b52beffSAndreas Gruenbacher __drbd_printk_peer_device, level, fmt, ## args), \ 1333b52beffSAndreas Gruenbacher drbd_printk_with_wrong_object_type())))) 1343b52beffSAndreas Gruenbacher 1353b52beffSAndreas Gruenbacher #define drbd_dbg(obj, fmt, args...) \ 1363b52beffSAndreas Gruenbacher drbd_printk(KERN_DEBUG, obj, fmt, ## args) 1373b52beffSAndreas Gruenbacher #define drbd_alert(obj, fmt, args...) \ 1383b52beffSAndreas Gruenbacher drbd_printk(KERN_ALERT, obj, fmt, ## args) 1393b52beffSAndreas Gruenbacher #define drbd_err(obj, fmt, args...) \ 1403b52beffSAndreas Gruenbacher drbd_printk(KERN_ERR, obj, fmt, ## args) 1413b52beffSAndreas Gruenbacher #define drbd_warn(obj, fmt, args...) \ 1423b52beffSAndreas Gruenbacher drbd_printk(KERN_WARNING, obj, fmt, ## args) 1433b52beffSAndreas Gruenbacher #define drbd_info(obj, fmt, args...) \ 1443b52beffSAndreas Gruenbacher drbd_printk(KERN_INFO, obj, fmt, ## args) 1453b52beffSAndreas Gruenbacher #define drbd_emerg(obj, fmt, args...) \ 1463b52beffSAndreas Gruenbacher drbd_printk(KERN_EMERG, obj, fmt, ## args) 147d0180171SAndreas Gruenbacher 148d0180171SAndreas Gruenbacher #define dynamic_drbd_dbg(device, fmt, args...) \ 149d0180171SAndreas Gruenbacher dynamic_dev_dbg(disk_to_dev(device->vdisk), fmt, ## args) 150b411b363SPhilipp Reisner 1510b0ba1efSAndreas Gruenbacher #define D_ASSERT(device, exp) do { \ 1520b0ba1efSAndreas Gruenbacher if (!(exp)) \ 1530b0ba1efSAndreas Gruenbacher drbd_err(device, "ASSERT( " #exp " ) in %s:%d\n", __FILE__, __LINE__); \ 1540b0ba1efSAndreas Gruenbacher } while (0) 155b411b363SPhilipp Reisner 156841ce241SAndreas Gruenbacher /** 157841ce241SAndreas Gruenbacher * expect - Make an assertion 158841ce241SAndreas Gruenbacher * 159841ce241SAndreas Gruenbacher * Unlike the assert macro, this macro returns a boolean result. 160841ce241SAndreas Gruenbacher */ 161841ce241SAndreas Gruenbacher #define expect(exp) ({ \ 162841ce241SAndreas Gruenbacher bool _bool = (exp); \ 163841ce241SAndreas Gruenbacher if (!_bool) \ 164d0180171SAndreas Gruenbacher drbd_err(device, "ASSERTION %s FAILED in %s\n", \ 165841ce241SAndreas Gruenbacher #exp, __func__); \ 166841ce241SAndreas Gruenbacher _bool; \ 167841ce241SAndreas Gruenbacher }) 168b411b363SPhilipp Reisner 169b411b363SPhilipp Reisner /* Defines to control fault insertion */ 170b411b363SPhilipp Reisner enum { 171b411b363SPhilipp Reisner DRBD_FAULT_MD_WR = 0, /* meta data write */ 172b411b363SPhilipp Reisner DRBD_FAULT_MD_RD = 1, /* read */ 173b411b363SPhilipp Reisner DRBD_FAULT_RS_WR = 2, /* resync */ 174b411b363SPhilipp Reisner DRBD_FAULT_RS_RD = 3, 175b411b363SPhilipp Reisner DRBD_FAULT_DT_WR = 4, /* data */ 176b411b363SPhilipp Reisner DRBD_FAULT_DT_RD = 5, 177b411b363SPhilipp Reisner DRBD_FAULT_DT_RA = 6, /* data read ahead */ 178b411b363SPhilipp Reisner DRBD_FAULT_BM_ALLOC = 7, /* bitmap allocation */ 179b411b363SPhilipp Reisner DRBD_FAULT_AL_EE = 8, /* alloc ee */ 1806b4388acSPhilipp Reisner DRBD_FAULT_RECEIVE = 9, /* Changes some bytes upon receiving a [rs]data block */ 181b411b363SPhilipp Reisner 182b411b363SPhilipp Reisner DRBD_FAULT_MAX, 183b411b363SPhilipp Reisner }; 184b411b363SPhilipp Reisner 185b411b363SPhilipp Reisner extern unsigned int 186b30ab791SAndreas Gruenbacher _drbd_insert_fault(struct drbd_device *device, unsigned int type); 1870cf9d27eSAndreas Gruenbacher 188b411b363SPhilipp Reisner static inline int 189b30ab791SAndreas Gruenbacher drbd_insert_fault(struct drbd_device *device, unsigned int type) { 1900cf9d27eSAndreas Gruenbacher #ifdef CONFIG_DRBD_FAULT_INJECTION 191b411b363SPhilipp Reisner return fault_rate && 192b411b363SPhilipp Reisner (enable_faults & (1<<type)) && 193b30ab791SAndreas Gruenbacher _drbd_insert_fault(device, type); 194b411b363SPhilipp Reisner #else 1950cf9d27eSAndreas Gruenbacher return 0; 196b411b363SPhilipp Reisner #endif 1970cf9d27eSAndreas Gruenbacher } 198b411b363SPhilipp Reisner 199b411b363SPhilipp Reisner /* integer division, round _UP_ to the next integer */ 200b411b363SPhilipp Reisner #define div_ceil(A, B) ((A)/(B) + ((A)%(B) ? 1 : 0)) 201b411b363SPhilipp Reisner /* usual integer division */ 202b411b363SPhilipp Reisner #define div_floor(A, B) ((A)/(B)) 203b411b363SPhilipp Reisner 204b411b363SPhilipp Reisner extern struct ratelimit_state drbd_ratelimit_state; 20505a10ec7SAndreas Gruenbacher extern struct idr drbd_devices; /* RCU, updates: genl_lock() */ 20677c556f6SAndreas Gruenbacher extern struct list_head drbd_resources; /* RCU, updates: genl_lock() */ 207b411b363SPhilipp Reisner 208d8763023SAndreas Gruenbacher extern const char *cmdname(enum drbd_packet cmd); 209b411b363SPhilipp Reisner 210b411b363SPhilipp Reisner /* for sending/receiving the bitmap, 211b411b363SPhilipp Reisner * possibly in some encoding scheme */ 212b411b363SPhilipp Reisner struct bm_xfer_ctx { 213b411b363SPhilipp Reisner /* "const" 214b411b363SPhilipp Reisner * stores total bits and long words 215b411b363SPhilipp Reisner * of the bitmap, so we don't need to 216b411b363SPhilipp Reisner * call the accessor functions over and again. */ 217b411b363SPhilipp Reisner unsigned long bm_bits; 218b411b363SPhilipp Reisner unsigned long bm_words; 219b411b363SPhilipp Reisner /* during xfer, current position within the bitmap */ 220b411b363SPhilipp Reisner unsigned long bit_offset; 221b411b363SPhilipp Reisner unsigned long word_offset; 222b411b363SPhilipp Reisner 223b411b363SPhilipp Reisner /* statistics; index: (h->command == P_BITMAP) */ 224b411b363SPhilipp Reisner unsigned packets[2]; 225b411b363SPhilipp Reisner unsigned bytes[2]; 226b411b363SPhilipp Reisner }; 227b411b363SPhilipp Reisner 228b30ab791SAndreas Gruenbacher extern void INFO_bm_xfer_stats(struct drbd_device *device, 229b411b363SPhilipp Reisner const char *direction, struct bm_xfer_ctx *c); 230b411b363SPhilipp Reisner 231b411b363SPhilipp Reisner static inline void bm_xfer_ctx_bit_to_word_offset(struct bm_xfer_ctx *c) 232b411b363SPhilipp Reisner { 233b411b363SPhilipp Reisner /* word_offset counts "native long words" (32 or 64 bit), 234b411b363SPhilipp Reisner * aligned at 64 bit. 235b411b363SPhilipp Reisner * Encoded packet may end at an unaligned bit offset. 236b411b363SPhilipp Reisner * In case a fallback clear text packet is transmitted in 237b411b363SPhilipp Reisner * between, we adjust this offset back to the last 64bit 238b411b363SPhilipp Reisner * aligned "native long word", which makes coding and decoding 239b411b363SPhilipp Reisner * the plain text bitmap much more convenient. */ 240b411b363SPhilipp Reisner #if BITS_PER_LONG == 64 241b411b363SPhilipp Reisner c->word_offset = c->bit_offset >> 6; 242b411b363SPhilipp Reisner #elif BITS_PER_LONG == 32 243b411b363SPhilipp Reisner c->word_offset = c->bit_offset >> 5; 244b411b363SPhilipp Reisner c->word_offset &= ~(1UL); 245b411b363SPhilipp Reisner #else 246b411b363SPhilipp Reisner # error "unsupported BITS_PER_LONG" 247b411b363SPhilipp Reisner #endif 248b411b363SPhilipp Reisner } 249b411b363SPhilipp Reisner 250bde89a9eSAndreas Gruenbacher extern unsigned int drbd_header_size(struct drbd_connection *connection); 251b411b363SPhilipp Reisner 252b411b363SPhilipp Reisner /**********************************************************************/ 253b411b363SPhilipp Reisner enum drbd_thread_state { 254e77a0a5cSAndreas Gruenbacher NONE, 255e77a0a5cSAndreas Gruenbacher RUNNING, 256e77a0a5cSAndreas Gruenbacher EXITING, 257e77a0a5cSAndreas Gruenbacher RESTARTING 258b411b363SPhilipp Reisner }; 259b411b363SPhilipp Reisner 260b411b363SPhilipp Reisner struct drbd_thread { 261b411b363SPhilipp Reisner spinlock_t t_lock; 262b411b363SPhilipp Reisner struct task_struct *task; 263b411b363SPhilipp Reisner struct completion stop; 264b411b363SPhilipp Reisner enum drbd_thread_state t_state; 265b411b363SPhilipp Reisner int (*function) (struct drbd_thread *); 2662457b6d5SAndreas Gruenbacher struct drbd_resource *resource; 267bde89a9eSAndreas Gruenbacher struct drbd_connection *connection; 268b411b363SPhilipp Reisner int reset_cpu_mask; 269c60b0251SAndreas Gruenbacher const char *name; 270b411b363SPhilipp Reisner }; 271b411b363SPhilipp Reisner 272b411b363SPhilipp Reisner static inline enum drbd_thread_state get_t_state(struct drbd_thread *thi) 273b411b363SPhilipp Reisner { 274b411b363SPhilipp Reisner /* THINK testing the t_state seems to be uncritical in all cases 275b411b363SPhilipp Reisner * (but thread_{start,stop}), so we can read it *without* the lock. 276b411b363SPhilipp Reisner * --lge */ 277b411b363SPhilipp Reisner 278b411b363SPhilipp Reisner smp_rmb(); 279b411b363SPhilipp Reisner return thi->t_state; 280b411b363SPhilipp Reisner } 281b411b363SPhilipp Reisner 282b411b363SPhilipp Reisner struct drbd_work { 283b411b363SPhilipp Reisner struct list_head list; 284309a8348SAndreas Gruenbacher int (*cb)(struct drbd_work *, int cancel); 28500d56944SPhilipp Reisner }; 28684b8c06bSAndreas Gruenbacher 28784b8c06bSAndreas Gruenbacher struct drbd_device_work { 28884b8c06bSAndreas Gruenbacher struct drbd_work w; 28984b8c06bSAndreas Gruenbacher struct drbd_device *device; 290b411b363SPhilipp Reisner }; 291b411b363SPhilipp Reisner 292ace652acSAndreas Gruenbacher #include "drbd_interval.h" 293ace652acSAndreas Gruenbacher 29454761697SAndreas Gruenbacher extern int drbd_wait_misc(struct drbd_device *, struct drbd_interval *); 2957be8da07SAndreas Gruenbacher 296b411b363SPhilipp Reisner struct drbd_request { 297b411b363SPhilipp Reisner struct drbd_work w; 29884b8c06bSAndreas Gruenbacher struct drbd_device *device; 299b411b363SPhilipp Reisner 300b411b363SPhilipp Reisner /* if local IO is not allowed, will be NULL. 301b411b363SPhilipp Reisner * if local IO _is_ allowed, holds the locally submitted bio clone, 302b411b363SPhilipp Reisner * or, after local IO completion, the ERR_PTR(error). 303fcefa62eSAndreas Gruenbacher * see drbd_request_endio(). */ 304b411b363SPhilipp Reisner struct bio *private_bio; 305b411b363SPhilipp Reisner 306ace652acSAndreas Gruenbacher struct drbd_interval i; 307b411b363SPhilipp Reisner 308b6dd1a89SLars Ellenberg /* epoch: used to check on "completion" whether this req was in 309b411b363SPhilipp Reisner * the current epoch, and we therefore have to close it, 310b6dd1a89SLars Ellenberg * causing a p_barrier packet to be send, starting a new epoch. 311b6dd1a89SLars Ellenberg * 312b6dd1a89SLars Ellenberg * This corresponds to "barrier" in struct p_barrier[_ack], 313b6dd1a89SLars Ellenberg * and to "barrier_nr" in struct drbd_epoch (and various 314b6dd1a89SLars Ellenberg * comments/function parameters/local variable names). 315b411b363SPhilipp Reisner */ 316b6dd1a89SLars Ellenberg unsigned int epoch; 317b411b363SPhilipp Reisner 318b411b363SPhilipp Reisner struct list_head tl_requests; /* ring list in the transfer log */ 319b411b363SPhilipp Reisner struct bio *master_bio; /* master bio pointer */ 320b411b363SPhilipp Reisner unsigned long start_time; 321b406777eSLars Ellenberg 322b406777eSLars Ellenberg /* once it hits 0, we may complete the master_bio */ 323b406777eSLars Ellenberg atomic_t completion_ref; 324b406777eSLars Ellenberg /* once it hits 0, we may destroy this drbd_request object */ 325b406777eSLars Ellenberg struct kref kref; 326a0d856dfSLars Ellenberg 327a0d856dfSLars Ellenberg unsigned rq_state; /* see comments above _req_mod() */ 328b411b363SPhilipp Reisner }; 329b411b363SPhilipp Reisner 330b411b363SPhilipp Reisner struct drbd_epoch { 331bde89a9eSAndreas Gruenbacher struct drbd_connection *connection; 332b411b363SPhilipp Reisner struct list_head list; 333b411b363SPhilipp Reisner unsigned int barrier_nr; 334b411b363SPhilipp Reisner atomic_t epoch_size; /* increased on every request added. */ 335b411b363SPhilipp Reisner atomic_t active; /* increased on every req. added, and dec on every finished. */ 336b411b363SPhilipp Reisner unsigned long flags; 337b411b363SPhilipp Reisner }; 338b411b363SPhilipp Reisner 339de0b2e69SRashika Kheria /* Prototype declaration of function defined in drbd_receiver.c */ 340de0b2e69SRashika Kheria int drbdd_init(struct drbd_thread *); 341de0b2e69SRashika Kheria int drbd_asender(struct drbd_thread *); 342de0b2e69SRashika Kheria 343b411b363SPhilipp Reisner /* drbd_epoch flag bits */ 344b411b363SPhilipp Reisner enum { 345b411b363SPhilipp Reisner DE_HAVE_BARRIER_NUMBER, 346b411b363SPhilipp Reisner }; 347b411b363SPhilipp Reisner 348b411b363SPhilipp Reisner enum epoch_event { 349b411b363SPhilipp Reisner EV_PUT, 350b411b363SPhilipp Reisner EV_GOT_BARRIER_NR, 351b411b363SPhilipp Reisner EV_BECAME_LAST, 352b411b363SPhilipp Reisner EV_CLEANUP = 32, /* used as flag */ 353b411b363SPhilipp Reisner }; 354b411b363SPhilipp Reisner 355b411b363SPhilipp Reisner struct digest_info { 356b411b363SPhilipp Reisner int digest_size; 357b411b363SPhilipp Reisner void *digest; 358b411b363SPhilipp Reisner }; 359b411b363SPhilipp Reisner 360f6ffca9fSAndreas Gruenbacher struct drbd_peer_request { 361a8cd15baSAndreas Gruenbacher struct drbd_work w; 362a8cd15baSAndreas Gruenbacher struct drbd_peer_device *peer_device; 36385719573SPhilipp Reisner struct drbd_epoch *epoch; /* for writes */ 36445bb912bSLars Ellenberg struct page *pages; 36545bb912bSLars Ellenberg atomic_t pending_bios; 366010f6e67SAndreas Gruenbacher struct drbd_interval i; 36745bb912bSLars Ellenberg /* see comments on ee flag bits below */ 36845bb912bSLars Ellenberg unsigned long flags; 36985719573SPhilipp Reisner union { 37045bb912bSLars Ellenberg u64 block_id; 37185719573SPhilipp Reisner struct digest_info *digest; 37285719573SPhilipp Reisner }; 37345bb912bSLars Ellenberg }; 37445bb912bSLars Ellenberg 37545bb912bSLars Ellenberg /* ee flag bits. 37645bb912bSLars Ellenberg * While corresponding bios are in flight, the only modification will be 37745bb912bSLars Ellenberg * set_bit WAS_ERROR, which has to be atomic. 37845bb912bSLars Ellenberg * If no bios are in flight yet, or all have been completed, 37945bb912bSLars Ellenberg * non-atomic modification to ee->flags is ok. 38045bb912bSLars Ellenberg */ 381b411b363SPhilipp Reisner enum { 382b411b363SPhilipp Reisner __EE_CALL_AL_COMPLETE_IO, 383b411b363SPhilipp Reisner __EE_MAY_SET_IN_SYNC, 38445bb912bSLars Ellenberg 385a0fb3c47SLars Ellenberg /* is this a TRIM aka REQ_DISCARD? */ 386a0fb3c47SLars Ellenberg __EE_IS_TRIM, 387a0fb3c47SLars Ellenberg /* our lower level cannot handle trim, 388a0fb3c47SLars Ellenberg * and we want to fall back to zeroout instead */ 389a0fb3c47SLars Ellenberg __EE_IS_TRIM_USE_ZEROOUT, 390a0fb3c47SLars Ellenberg 39145bb912bSLars Ellenberg /* In case a barrier failed, 39245bb912bSLars Ellenberg * we need to resubmit without the barrier flag. */ 39345bb912bSLars Ellenberg __EE_RESUBMITTED, 39445bb912bSLars Ellenberg 3956c852becSAndreas Gruenbacher /* we may have several bios per peer request. 39645bb912bSLars Ellenberg * if any of those fail, we set this flag atomically 39745bb912bSLars Ellenberg * from the endio callback */ 39845bb912bSLars Ellenberg __EE_WAS_ERROR, 399c36c3cedSLars Ellenberg 400c36c3cedSLars Ellenberg /* This ee has a pointer to a digest instead of a block id */ 401c36c3cedSLars Ellenberg __EE_HAS_DIGEST, 4027be8da07SAndreas Gruenbacher 4037be8da07SAndreas Gruenbacher /* Conflicting local requests need to be restarted after this request */ 4047be8da07SAndreas Gruenbacher __EE_RESTART_REQUESTS, 405303d1448SPhilipp Reisner 406303d1448SPhilipp Reisner /* The peer wants a write ACK for this (wire proto C) */ 407303d1448SPhilipp Reisner __EE_SEND_WRITE_ACK, 408302bdeaeSPhilipp Reisner 409302bdeaeSPhilipp Reisner /* Is set when net_conf had two_primaries set while creating this peer_req */ 410302bdeaeSPhilipp Reisner __EE_IN_INTERVAL_TREE, 411b411b363SPhilipp Reisner }; 412b411b363SPhilipp Reisner #define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO) 413b411b363SPhilipp Reisner #define EE_MAY_SET_IN_SYNC (1<<__EE_MAY_SET_IN_SYNC) 414a0fb3c47SLars Ellenberg #define EE_IS_TRIM (1<<__EE_IS_TRIM) 415a0fb3c47SLars Ellenberg #define EE_IS_TRIM_USE_ZEROOUT (1<<__EE_IS_TRIM_USE_ZEROOUT) 41645bb912bSLars Ellenberg #define EE_RESUBMITTED (1<<__EE_RESUBMITTED) 41745bb912bSLars Ellenberg #define EE_WAS_ERROR (1<<__EE_WAS_ERROR) 418c36c3cedSLars Ellenberg #define EE_HAS_DIGEST (1<<__EE_HAS_DIGEST) 4197be8da07SAndreas Gruenbacher #define EE_RESTART_REQUESTS (1<<__EE_RESTART_REQUESTS) 420303d1448SPhilipp Reisner #define EE_SEND_WRITE_ACK (1<<__EE_SEND_WRITE_ACK) 421302bdeaeSPhilipp Reisner #define EE_IN_INTERVAL_TREE (1<<__EE_IN_INTERVAL_TREE) 422b411b363SPhilipp Reisner 423b30ab791SAndreas Gruenbacher /* flag bits per device */ 424b411b363SPhilipp Reisner enum { 425b411b363SPhilipp Reisner UNPLUG_REMOTE, /* sending a "UnplugRemote" could help */ 426b411b363SPhilipp Reisner MD_DIRTY, /* current uuids and flags not yet on disk */ 427b411b363SPhilipp Reisner USE_DEGR_WFC_T, /* degr-wfc-timeout instead of wfc-timeout. */ 428b411b363SPhilipp Reisner CL_ST_CHG_SUCCESS, 429b411b363SPhilipp Reisner CL_ST_CHG_FAIL, 430b411b363SPhilipp Reisner CRASHED_PRIMARY, /* This node was a crashed primary. 431b411b363SPhilipp Reisner * Gets cleared when the state.conn 432b411b363SPhilipp Reisner * goes into C_CONNECTED state. */ 433b411b363SPhilipp Reisner CONSIDER_RESYNC, 434b411b363SPhilipp Reisner 435a8a4e51eSPhilipp Reisner MD_NO_FUA, /* Users wants us to not use FUA/FLUSH on meta data dev */ 4365ab7d2c0SLars Ellenberg 437b411b363SPhilipp Reisner SUSPEND_IO, /* suspend application io */ 438b411b363SPhilipp Reisner BITMAP_IO, /* suspend application io; 439b411b363SPhilipp Reisner once no more io in flight, start bitmap io */ 440b411b363SPhilipp Reisner BITMAP_IO_QUEUED, /* Started bitmap IO */ 441a2a3c74fSLars Ellenberg WAS_IO_ERROR, /* Local disk failed, returned IO error */ 442a2a3c74fSLars Ellenberg WAS_READ_ERROR, /* Local disk READ failed (set additionally to the above) */ 443383606e0SLars Ellenberg FORCE_DETACH, /* Force-detach from local disk, aborting any pending local IO */ 444b411b363SPhilipp Reisner RESYNC_AFTER_NEG, /* Resync after online grow after the attach&negotiate finished. */ 445b411b363SPhilipp Reisner RESIZE_PENDING, /* Size change detected locally, waiting for the response from 446b411b363SPhilipp Reisner * the peer, if it changed there as well. */ 44743a5182cSPhilipp Reisner NEW_CUR_UUID, /* Create new current UUID when thawing IO */ 4480778286aSPhilipp Reisner AL_SUSPENDED, /* Activity logging is currently suspended. */ 449370a43e7SPhilipp Reisner AHEAD_TO_SYNC_SOURCE, /* Ahead -> SyncSource queued */ 450e64a3294SPhilipp Reisner B_RS_H_DONE, /* Before resync handler done (already executed) */ 45108b165baSPhilipp Reisner DISCARD_MY_DATA, /* discard_my_data flag per volume */ 452380207d0SPhilipp Reisner READ_BALANCE_RR, 453e334f550SLars Ellenberg 454e334f550SLars Ellenberg /* cleared only after backing device related structures have been destroyed. */ 455e334f550SLars Ellenberg GOING_DISKLESS, /* Disk is being detached, because of io-error, or admin request. */ 456e334f550SLars Ellenberg 457e334f550SLars Ellenberg /* to be used in drbd_device_post_work() */ 458e334f550SLars Ellenberg GO_DISKLESS, /* tell worker to schedule cleanup before detach */ 459e334f550SLars Ellenberg DESTROY_DISK, /* tell worker to close backing devices and destroy related structures. */ 460ac0acb9eSLars Ellenberg MD_SYNC, /* tell worker to call drbd_md_sync() */ 461ac0acb9eSLars Ellenberg RS_START, /* tell worker to start resync/OV */ 462e334f550SLars Ellenberg RS_PROGRESS, /* tell worker that resync made significant progress */ 463e334f550SLars Ellenberg RS_DONE, /* tell worker that resync is done */ 464b411b363SPhilipp Reisner }; 465b411b363SPhilipp Reisner 46654761697SAndreas Gruenbacher struct drbd_bitmap; /* opaque for drbd_device */ 467b411b363SPhilipp Reisner 46820ceb2b2SLars Ellenberg /* definition of bits in bm_flags to be used in drbd_bm_lock 46920ceb2b2SLars Ellenberg * and drbd_bitmap_io and friends. */ 47020ceb2b2SLars Ellenberg enum bm_flag { 47120ceb2b2SLars Ellenberg /* do we need to kfree, or vfree bm_pages? */ 47220ceb2b2SLars Ellenberg BM_P_VMALLOCED = 0x10000, /* internal use only, will be masked out */ 47320ceb2b2SLars Ellenberg 47420ceb2b2SLars Ellenberg /* currently locked for bulk operation */ 4750e8488adSLars Ellenberg BM_LOCKED_MASK = 0xf, 47620ceb2b2SLars Ellenberg 47720ceb2b2SLars Ellenberg /* in detail, that is: */ 47820ceb2b2SLars Ellenberg BM_DONT_CLEAR = 0x1, 47920ceb2b2SLars Ellenberg BM_DONT_SET = 0x2, 48020ceb2b2SLars Ellenberg BM_DONT_TEST = 0x4, 48120ceb2b2SLars Ellenberg 4820e8488adSLars Ellenberg /* so we can mark it locked for bulk operation, 4830e8488adSLars Ellenberg * and still allow all non-bulk operations */ 4840e8488adSLars Ellenberg BM_IS_LOCKED = 0x8, 4850e8488adSLars Ellenberg 48620ceb2b2SLars Ellenberg /* (test bit, count bit) allowed (common case) */ 4870e8488adSLars Ellenberg BM_LOCKED_TEST_ALLOWED = BM_DONT_CLEAR | BM_DONT_SET | BM_IS_LOCKED, 48820ceb2b2SLars Ellenberg 48920ceb2b2SLars Ellenberg /* testing bits, as well as setting new bits allowed, but clearing bits 49020ceb2b2SLars Ellenberg * would be unexpected. Used during bitmap receive. Setting new bits 49120ceb2b2SLars Ellenberg * requires sending of "out-of-sync" information, though. */ 4920e8488adSLars Ellenberg BM_LOCKED_SET_ALLOWED = BM_DONT_CLEAR | BM_IS_LOCKED, 49320ceb2b2SLars Ellenberg 4940e8488adSLars Ellenberg /* for drbd_bm_write_copy_pages, everything is allowed, 4950e8488adSLars Ellenberg * only concurrent bulk operations are locked out. */ 4960e8488adSLars Ellenberg BM_LOCKED_CHANGE_ALLOWED = BM_IS_LOCKED, 49720ceb2b2SLars Ellenberg }; 49820ceb2b2SLars Ellenberg 499b411b363SPhilipp Reisner struct drbd_work_queue { 500b411b363SPhilipp Reisner struct list_head q; 501b411b363SPhilipp Reisner spinlock_t q_lock; /* to protect the list. */ 5028c0785a5SLars Ellenberg wait_queue_head_t q_wait; 503b411b363SPhilipp Reisner }; 504b411b363SPhilipp Reisner 505b411b363SPhilipp Reisner struct drbd_socket { 506b411b363SPhilipp Reisner struct mutex mutex; 507b411b363SPhilipp Reisner struct socket *socket; 508b411b363SPhilipp Reisner /* this way we get our 509b411b363SPhilipp Reisner * send/receive buffers off the stack */ 5105a87d920SAndreas Gruenbacher void *sbuf; 511e6ef8a5cSAndreas Gruenbacher void *rbuf; 512b411b363SPhilipp Reisner }; 513b411b363SPhilipp Reisner 514b411b363SPhilipp Reisner struct drbd_md { 515b411b363SPhilipp Reisner u64 md_offset; /* sector offset to 'super' block */ 516b411b363SPhilipp Reisner 517b411b363SPhilipp Reisner u64 la_size_sect; /* last agreed size, unit sectors */ 5189f2247bbSPhilipp Reisner spinlock_t uuid_lock; 519b411b363SPhilipp Reisner u64 uuid[UI_SIZE]; 520b411b363SPhilipp Reisner u64 device_uuid; 521b411b363SPhilipp Reisner u32 flags; 522b411b363SPhilipp Reisner u32 md_size_sect; 523b411b363SPhilipp Reisner 524ae8bf312SLars Ellenberg s32 al_offset; /* signed relative sector offset to activity log */ 525b411b363SPhilipp Reisner s32 bm_offset; /* signed relative sector offset to bitmap */ 5263a4d4eb3SLars Ellenberg 5273a4d4eb3SLars Ellenberg /* cached value of bdev->disk_conf->meta_dev_idx (see below) */ 5283a4d4eb3SLars Ellenberg s32 meta_dev_idx; 5293a4d4eb3SLars Ellenberg 5303a4d4eb3SLars Ellenberg /* see al_tr_number_to_on_disk_sector() */ 5313a4d4eb3SLars Ellenberg u32 al_stripes; 5323a4d4eb3SLars Ellenberg u32 al_stripe_size_4k; 5333a4d4eb3SLars Ellenberg u32 al_size_4k; /* cached product of the above */ 534b411b363SPhilipp Reisner }; 535b411b363SPhilipp Reisner 536b411b363SPhilipp Reisner struct drbd_backing_dev { 537b411b363SPhilipp Reisner struct block_device *backing_bdev; 538b411b363SPhilipp Reisner struct block_device *md_bdev; 539b411b363SPhilipp Reisner struct drbd_md md; 5400500813fSAndreas Gruenbacher struct disk_conf *disk_conf; /* RCU, for updates: resource->conf_update */ 541b411b363SPhilipp Reisner sector_t known_size; /* last known size of that backing device */ 542b411b363SPhilipp Reisner }; 543b411b363SPhilipp Reisner 544b411b363SPhilipp Reisner struct drbd_md_io { 545*e37d2438SLars Ellenberg struct page *page; 546*e37d2438SLars Ellenberg unsigned long start_jif; /* last call to drbd_md_get_buffer */ 547*e37d2438SLars Ellenberg unsigned long submit_jif; /* last _drbd_md_sync_page_io() submit */ 548*e37d2438SLars Ellenberg const char *current_use; 549*e37d2438SLars Ellenberg atomic_t in_use; 5500c464425SPhilipp Reisner unsigned int done; 551b411b363SPhilipp Reisner int error; 552b411b363SPhilipp Reisner }; 553b411b363SPhilipp Reisner 554b411b363SPhilipp Reisner struct bm_io_work { 555b411b363SPhilipp Reisner struct drbd_work w; 556b411b363SPhilipp Reisner char *why; 55720ceb2b2SLars Ellenberg enum bm_flag flags; 558b30ab791SAndreas Gruenbacher int (*io_fn)(struct drbd_device *device); 559b30ab791SAndreas Gruenbacher void (*done)(struct drbd_device *device, int rv); 560b411b363SPhilipp Reisner }; 561b411b363SPhilipp Reisner 562b411b363SPhilipp Reisner enum write_ordering_e { 563b411b363SPhilipp Reisner WO_none, 564b411b363SPhilipp Reisner WO_drain_io, 565b411b363SPhilipp Reisner WO_bdev_flush, 566b411b363SPhilipp Reisner }; 567b411b363SPhilipp Reisner 568778f271dSPhilipp Reisner struct fifo_buffer { 569778f271dSPhilipp Reisner unsigned int head_index; 570778f271dSPhilipp Reisner unsigned int size; 5719958c857SPhilipp Reisner int total; /* sum of all values */ 5729958c857SPhilipp Reisner int values[0]; 573778f271dSPhilipp Reisner }; 5749958c857SPhilipp Reisner extern struct fifo_buffer *fifo_alloc(int fifo_size); 575778f271dSPhilipp Reisner 576bde89a9eSAndreas Gruenbacher /* flag bits per connection */ 57701a311a5SPhilipp Reisner enum { 57801a311a5SPhilipp Reisner NET_CONGESTED, /* The data socket is congested */ 579427c0434SLars Ellenberg RESOLVE_CONFLICTS, /* Set on one node, cleared on the peer! */ 580e43ef195SPhilipp Reisner SEND_PING, /* whether asender should send a ping asap */ 581808e37b8SPhilipp Reisner SIGNAL_ASENDER, /* whether asender wants to be interrupted */ 5822a67d8b9SPhilipp Reisner GOT_PING_ACK, /* set when we receive a ping_ack packet, ping_wait gets woken */ 5834d0fc3fdSPhilipp Reisner CONN_WD_ST_CHG_REQ, /* A cluster wide state change on the connection is active */ 584fc3b10a4SPhilipp Reisner CONN_WD_ST_CHG_OKAY, 585fc3b10a4SPhilipp Reisner CONN_WD_ST_CHG_FAIL, 5868169e41bSPhilipp Reisner CONN_DRY_RUN, /* Expect disconnect after resync handshake. */ 5876936fcb4SPhilipp Reisner CREATE_BARRIER, /* next P_DATA is preceded by a P_BARRIER */ 588a1096a6eSPhilipp Reisner STATE_SENT, /* Do not change state/UUIDs while this is set */ 5896f3465edSLars Ellenberg CALLBACK_PENDING, /* Whether we have a call_usermodehelper(, UMH_WAIT_PROC) 5906f3465edSLars Ellenberg * pending, from drbd worker context. 5916f3465edSLars Ellenberg * If set, bdi_write_congested() returns true, 5926f3465edSLars Ellenberg * so shrink_page_list() would not recurse into, 5936f3465edSLars Ellenberg * and potentially deadlock on, this drbd worker. 5946f3465edSLars Ellenberg */ 595b66623e3SPhilipp Reisner DISCONNECT_SENT, 596e334f550SLars Ellenberg 597e334f550SLars Ellenberg DEVICE_WORK_PENDING, /* tell worker that some device has pending work */ 59801a311a5SPhilipp Reisner }; 59901a311a5SPhilipp Reisner 60077c556f6SAndreas Gruenbacher struct drbd_resource { 60177c556f6SAndreas Gruenbacher char *name; 60277c556f6SAndreas Gruenbacher struct kref kref; 603803ea134SAndreas Gruenbacher struct idr devices; /* volume number to device mapping */ 60477c556f6SAndreas Gruenbacher struct list_head connections; 60577c556f6SAndreas Gruenbacher struct list_head resources; 606eb6bea67SAndreas Gruenbacher struct res_opts res_opts; 6070500813fSAndreas Gruenbacher struct mutex conf_update; /* mutex for ready-copy-update of net_conf and disk_conf */ 6089e276872SLars Ellenberg struct mutex adm_mutex; /* mutex to serialize administrative requests */ 6090500813fSAndreas Gruenbacher spinlock_t req_lock; 6106bbf53caSAndreas Gruenbacher 6116bbf53caSAndreas Gruenbacher unsigned susp:1; /* IO suspended by user */ 6126bbf53caSAndreas Gruenbacher unsigned susp_nod:1; /* IO suspended because no data */ 6136bbf53caSAndreas Gruenbacher unsigned susp_fen:1; /* IO suspended because fence peer handler runs */ 614625a6ba2SAndreas Gruenbacher 615e9526580SPhilipp Reisner enum write_ordering_e write_ordering; 616e9526580SPhilipp Reisner 617625a6ba2SAndreas Gruenbacher cpumask_var_t cpu_mask; 61877c556f6SAndreas Gruenbacher }; 61977c556f6SAndreas Gruenbacher 62077c556f6SAndreas Gruenbacher struct drbd_connection { 62177c556f6SAndreas Gruenbacher struct list_head connections; 62277c556f6SAndreas Gruenbacher struct drbd_resource *resource; 6239dc9fbb3SPhilipp Reisner struct kref kref; 624c06ece6bSAndreas Gruenbacher struct idr peer_devices; /* volume number to peer device mapping */ 625bbeb641cSPhilipp Reisner enum drbd_conns cstate; /* Only C_STANDALONE to C_WF_REPORT_PARAMS */ 6268410da8fSPhilipp Reisner struct mutex cstate_mutex; /* Protects graceful disconnects */ 62728e448bbSPhilipp Reisner unsigned int connect_cnt; /* Inc each time a connection is established */ 6282111438bSPhilipp Reisner 629062e879cSPhilipp Reisner unsigned long flags; 63044ed167dSPhilipp Reisner struct net_conf *net_conf; /* content protected by rcu */ 6312a67d8b9SPhilipp Reisner wait_queue_head_t ping_wait; /* Woken upon reception of a ping, and a state change */ 632e42325a5SPhilipp Reisner 633089c075dSAndreas Gruenbacher struct sockaddr_storage my_addr; 634089c075dSAndreas Gruenbacher int my_addr_len; 635089c075dSAndreas Gruenbacher struct sockaddr_storage peer_addr; 636089c075dSAndreas Gruenbacher int peer_addr_len; 637089c075dSAndreas Gruenbacher 638e42325a5SPhilipp Reisner struct drbd_socket data; /* data/barrier/cstate/parameter packets */ 639e42325a5SPhilipp Reisner struct drbd_socket meta; /* ping/ack (metadata) packets */ 64031890f4aSPhilipp Reisner int agreed_pro_version; /* actually used protocol version */ 64120c68fdeSLars Ellenberg u32 agreed_features; 64231890f4aSPhilipp Reisner unsigned long last_received; /* in jiffies, either socket */ 64331890f4aSPhilipp Reisner unsigned int ko_count; 644e6b3ea83SPhilipp Reisner 645b6dd1a89SLars Ellenberg struct list_head transfer_log; /* all requests not yet fully processed */ 64687eeee41SPhilipp Reisner 647a0638456SPhilipp Reisner struct crypto_hash *cram_hmac_tfm; 648bde89a9eSAndreas Gruenbacher struct crypto_hash *integrity_tfm; /* checksums we compute, updates protected by connection->data->mutex */ 649036b17eaSPhilipp Reisner struct crypto_hash *peer_integrity_tfm; /* checksums we verify, only accessed from receiver thread */ 650f399002eSLars Ellenberg struct crypto_hash *csums_tfm; 651f399002eSLars Ellenberg struct crypto_hash *verify_tfm; 652a0638456SPhilipp Reisner void *int_dig_in; 653a0638456SPhilipp Reisner void *int_dig_vv; 654a0638456SPhilipp Reisner 655b6dd1a89SLars Ellenberg /* receiver side */ 65612038a3aSPhilipp Reisner struct drbd_epoch *current_epoch; 65712038a3aSPhilipp Reisner spinlock_t epoch_lock; 65812038a3aSPhilipp Reisner unsigned int epochs; 659b379c41eSLars Ellenberg atomic_t current_tle_nr; /* transfer log epoch number */ 660b6dd1a89SLars Ellenberg unsigned current_tle_writes; /* writes seen within this tl epoch */ 6614b0007c0SPhilipp Reisner 66207be15b1SLars Ellenberg unsigned long last_reconnect_jif; 663e6b3ea83SPhilipp Reisner struct drbd_thread receiver; 664e6b3ea83SPhilipp Reisner struct drbd_thread worker; 665e6b3ea83SPhilipp Reisner struct drbd_thread asender; 666b6dd1a89SLars Ellenberg 667b6dd1a89SLars Ellenberg /* sender side */ 668d5b27b01SLars Ellenberg struct drbd_work_queue sender_work; 669b6dd1a89SLars Ellenberg 670b6dd1a89SLars Ellenberg struct { 671b6dd1a89SLars Ellenberg /* whether this sender thread 672b6dd1a89SLars Ellenberg * has processed a single write yet. */ 673b6dd1a89SLars Ellenberg bool seen_any_write_yet; 674b6dd1a89SLars Ellenberg 675b6dd1a89SLars Ellenberg /* Which barrier number to send with the next P_BARRIER */ 676b6dd1a89SLars Ellenberg int current_epoch_nr; 677b6dd1a89SLars Ellenberg 678b6dd1a89SLars Ellenberg /* how many write requests have been sent 679b6dd1a89SLars Ellenberg * with req->epoch == current_epoch_nr. 680b6dd1a89SLars Ellenberg * If none, no P_BARRIER will be sent. */ 681b6dd1a89SLars Ellenberg unsigned current_epoch_writes; 682b6dd1a89SLars Ellenberg } send; 683b411b363SPhilipp Reisner }; 684b411b363SPhilipp Reisner 685113fef9eSLars Ellenberg struct submit_worker { 686113fef9eSLars Ellenberg struct workqueue_struct *wq; 687113fef9eSLars Ellenberg struct work_struct worker; 688113fef9eSLars Ellenberg 689113fef9eSLars Ellenberg spinlock_t lock; 690113fef9eSLars Ellenberg struct list_head writes; 691113fef9eSLars Ellenberg }; 692113fef9eSLars Ellenberg 693a6b32bc3SAndreas Gruenbacher struct drbd_peer_device { 694a6b32bc3SAndreas Gruenbacher struct list_head peer_devices; 695a6b32bc3SAndreas Gruenbacher struct drbd_device *device; 696bde89a9eSAndreas Gruenbacher struct drbd_connection *connection; 697a6b32bc3SAndreas Gruenbacher }; 698a6b32bc3SAndreas Gruenbacher 699a6b32bc3SAndreas Gruenbacher struct drbd_device { 700d8628a86SAndreas Gruenbacher struct drbd_resource *resource; 701a6b32bc3SAndreas Gruenbacher struct list_head peer_devices; 7022111438bSPhilipp Reisner int vnr; /* volume number within the connection */ 70381fa2e67SPhilipp Reisner struct kref kref; 7042111438bSPhilipp Reisner 705b411b363SPhilipp Reisner /* things that are stored as / read from meta data on disk */ 706b411b363SPhilipp Reisner unsigned long flags; 707b411b363SPhilipp Reisner 708b411b363SPhilipp Reisner /* configured by drbdsetup */ 709b411b363SPhilipp Reisner struct drbd_backing_dev *ldev __protected_by(local); 710b411b363SPhilipp Reisner 711b411b363SPhilipp Reisner sector_t p_size; /* partner's disk size */ 712b411b363SPhilipp Reisner struct request_queue *rq_queue; 713b411b363SPhilipp Reisner struct block_device *this_bdev; 714b411b363SPhilipp Reisner struct gendisk *vdisk; 715b411b363SPhilipp Reisner 71607be15b1SLars Ellenberg unsigned long last_reattach_jif; 71784b8c06bSAndreas Gruenbacher struct drbd_work resync_work; 71884b8c06bSAndreas Gruenbacher struct drbd_work unplug_work; 719b411b363SPhilipp Reisner struct timer_list resync_timer; 720b411b363SPhilipp Reisner struct timer_list md_sync_timer; 721370a43e7SPhilipp Reisner struct timer_list start_resync_timer; 7227fde2be9SPhilipp Reisner struct timer_list request_timer; 723b411b363SPhilipp Reisner 724b411b363SPhilipp Reisner /* Used after attach while negotiating new disk state. */ 725b411b363SPhilipp Reisner union drbd_state new_state_tmp; 726b411b363SPhilipp Reisner 727da9fbc27SPhilipp Reisner union drbd_dev_state state; 728b411b363SPhilipp Reisner wait_queue_head_t misc_wait; 729b411b363SPhilipp Reisner wait_queue_head_t state_wait; /* upon each state change. */ 730b411b363SPhilipp Reisner unsigned int send_cnt; 731b411b363SPhilipp Reisner unsigned int recv_cnt; 732b411b363SPhilipp Reisner unsigned int read_cnt; 733b411b363SPhilipp Reisner unsigned int writ_cnt; 734b411b363SPhilipp Reisner unsigned int al_writ_cnt; 735b411b363SPhilipp Reisner unsigned int bm_writ_cnt; 736b411b363SPhilipp Reisner atomic_t ap_bio_cnt; /* Requests we need to complete */ 737b411b363SPhilipp Reisner atomic_t ap_pending_cnt; /* AP data packets on the wire, ack expected */ 738b411b363SPhilipp Reisner atomic_t rs_pending_cnt; /* RS request/data packets on the wire */ 739d942ae44SPhilipp Reisner atomic_t unacked_cnt; /* Need to send replies for */ 740b411b363SPhilipp Reisner atomic_t local_cnt; /* Waiting for local completion */ 741b2fb6dbeSPhilipp Reisner 742dac1389cSAndreas Gruenbacher /* Interval tree of pending local requests */ 743dac1389cSAndreas Gruenbacher struct rb_root read_requests; 744de696716SAndreas Gruenbacher struct rb_root write_requests; 745b411b363SPhilipp Reisner 746aaaba345SLars Ellenberg /* use checksums for *this* resync */ 747aaaba345SLars Ellenberg bool use_csums; 7484b0715f0SLars Ellenberg /* blocks to resync in this run [unit BM_BLOCK_SIZE] */ 749b411b363SPhilipp Reisner unsigned long rs_total; 7504b0715f0SLars Ellenberg /* number of resync blocks that failed in this run */ 751b411b363SPhilipp Reisner unsigned long rs_failed; 752b411b363SPhilipp Reisner /* Syncer's start time [unit jiffies] */ 753b411b363SPhilipp Reisner unsigned long rs_start; 754b411b363SPhilipp Reisner /* cumulated time in PausedSyncX state [unit jiffies] */ 755b411b363SPhilipp Reisner unsigned long rs_paused; 7561d7734a0SLars Ellenberg /* skipped because csum was equal [unit BM_BLOCK_SIZE] */ 757b411b363SPhilipp Reisner unsigned long rs_same_csum; 7581d7734a0SLars Ellenberg #define DRBD_SYNC_MARKS 8 7591d7734a0SLars Ellenberg #define DRBD_SYNC_MARK_STEP (3*HZ) 7601d7734a0SLars Ellenberg /* block not up-to-date at mark [unit BM_BLOCK_SIZE] */ 7611d7734a0SLars Ellenberg unsigned long rs_mark_left[DRBD_SYNC_MARKS]; 7621d7734a0SLars Ellenberg /* marks's time [unit jiffies] */ 7631d7734a0SLars Ellenberg unsigned long rs_mark_time[DRBD_SYNC_MARKS]; 7641d7734a0SLars Ellenberg /* current index into rs_mark_{left,time} */ 7651d7734a0SLars Ellenberg int rs_last_mark; 766328e0f12SPhilipp Reisner unsigned long rs_last_bcast; /* [unit jiffies] */ 767b411b363SPhilipp Reisner 768b411b363SPhilipp Reisner /* where does the admin want us to start? (sector) */ 769b411b363SPhilipp Reisner sector_t ov_start_sector; 77002b91b55SLars Ellenberg sector_t ov_stop_sector; 771b411b363SPhilipp Reisner /* where are we now? (sector) */ 772b411b363SPhilipp Reisner sector_t ov_position; 773b411b363SPhilipp Reisner /* Start sector of out of sync range (to merge printk reporting). */ 774b411b363SPhilipp Reisner sector_t ov_last_oos_start; 775b411b363SPhilipp Reisner /* size of out-of-sync range in sectors. */ 776b411b363SPhilipp Reisner sector_t ov_last_oos_size; 777b411b363SPhilipp Reisner unsigned long ov_left; /* in bits */ 778b411b363SPhilipp Reisner 779b411b363SPhilipp Reisner struct drbd_bitmap *bitmap; 780b411b363SPhilipp Reisner unsigned long bm_resync_fo; /* bit offset for drbd_bm_find_next */ 781b411b363SPhilipp Reisner 782b411b363SPhilipp Reisner /* Used to track operations of resync... */ 783b411b363SPhilipp Reisner struct lru_cache *resync; 784b411b363SPhilipp Reisner /* Number of locked elements in resync LRU */ 785b411b363SPhilipp Reisner unsigned int resync_locked; 786b411b363SPhilipp Reisner /* resync extent number waiting for application requests */ 787b411b363SPhilipp Reisner unsigned int resync_wenr; 788b411b363SPhilipp Reisner 789b411b363SPhilipp Reisner int open_cnt; 790b411b363SPhilipp Reisner u64 *p_uuid; 7914b0007c0SPhilipp Reisner 79285719573SPhilipp Reisner struct list_head active_ee; /* IO in progress (P_DATA gets written to disk) */ 79385719573SPhilipp Reisner struct list_head sync_ee; /* IO in progress (P_RS_DATA_REPLY gets written to disk) */ 79418b75d75SAndreas Gruenbacher struct list_head done_ee; /* need to send P_WRITE_ACK */ 79518b75d75SAndreas Gruenbacher struct list_head read_ee; /* [RS]P_DATA_REQUEST being read */ 796b411b363SPhilipp Reisner struct list_head net_ee; /* zero-copy network send in progress */ 797b411b363SPhilipp Reisner 798b411b363SPhilipp Reisner int next_barrier_nr; 799b411b363SPhilipp Reisner struct list_head resync_reads; 800435f0740SLars Ellenberg atomic_t pp_in_use; /* allocated from page pool */ 801435f0740SLars Ellenberg atomic_t pp_in_use_by_net; /* sendpage()d, still referenced by tcp */ 802b411b363SPhilipp Reisner wait_queue_head_t ee_wait; 803cc94c650SPhilipp Reisner struct drbd_md_io md_io; 804b411b363SPhilipp Reisner spinlock_t al_lock; 805b411b363SPhilipp Reisner wait_queue_head_t al_wait; 806b411b363SPhilipp Reisner struct lru_cache *act_log; /* activity log */ 807b411b363SPhilipp Reisner unsigned int al_tr_number; 808b411b363SPhilipp Reisner int al_tr_cycle; 809b411b363SPhilipp Reisner wait_queue_head_t seq_wait; 810b411b363SPhilipp Reisner atomic_t packet_seq; 811b411b363SPhilipp Reisner unsigned int peer_seq; 812b411b363SPhilipp Reisner spinlock_t peer_seq_lock; 813b411b363SPhilipp Reisner unsigned int minor; 814b411b363SPhilipp Reisner unsigned long comm_bm_set; /* communicated number of set bits. */ 815b411b363SPhilipp Reisner struct bm_io_work bm_io_work; 816b411b363SPhilipp Reisner u64 ed_uuid; /* UUID of the exposed data */ 8178410da8fSPhilipp Reisner struct mutex own_state_mutex; 818a6b32bc3SAndreas Gruenbacher struct mutex *state_mutex; /* either own_state_mutex or first_peer_device(device)->connection->cstate_mutex */ 819b411b363SPhilipp Reisner char congestion_reason; /* Why we where congested... */ 8201d7734a0SLars Ellenberg atomic_t rs_sect_in; /* for incoming resync data rate, SyncTarget */ 8211d7734a0SLars Ellenberg atomic_t rs_sect_ev; /* for submitted resync data rate, both */ 8221d7734a0SLars Ellenberg int rs_last_sect_ev; /* counter to compare with */ 8231d7734a0SLars Ellenberg int rs_last_events; /* counter of read or write "events" (unit sectors) 8241d7734a0SLars Ellenberg * on the lower level device when we last looked. */ 8251d7734a0SLars Ellenberg int c_sync_rate; /* current resync rate after syncer throttle magic */ 826bde89a9eSAndreas Gruenbacher struct fifo_buffer *rs_plan_s; /* correction values of resync planer (RCU, connection->conn_update) */ 827778f271dSPhilipp Reisner int rs_in_flight; /* resync sectors in flight (to proxy, in proxy and from proxy) */ 828759fbdfbSPhilipp Reisner atomic_t ap_in_flight; /* App sectors in flight (waiting for ack) */ 829db141b2fSLars Ellenberg unsigned int peer_max_bio_size; 830db141b2fSLars Ellenberg unsigned int local_max_bio_size; 831113fef9eSLars Ellenberg 832113fef9eSLars Ellenberg /* any requests that would block in drbd_make_request() 833113fef9eSLars Ellenberg * are deferred to this single-threaded work queue */ 834113fef9eSLars Ellenberg struct submit_worker submit; 835b411b363SPhilipp Reisner }; 836b411b363SPhilipp Reisner 837a910b123SLars Ellenberg struct drbd_config_context { 838a910b123SLars Ellenberg /* assigned from drbd_genlmsghdr */ 839a910b123SLars Ellenberg unsigned int minor; 840a910b123SLars Ellenberg /* assigned from request attributes, if present */ 841a910b123SLars Ellenberg unsigned int volume; 842a910b123SLars Ellenberg #define VOLUME_UNSPECIFIED (-1U) 843a910b123SLars Ellenberg /* pointer into the request skb, 844a910b123SLars Ellenberg * limited lifetime! */ 845a910b123SLars Ellenberg char *resource_name; 846a910b123SLars Ellenberg struct nlattr *my_addr; 847a910b123SLars Ellenberg struct nlattr *peer_addr; 848a910b123SLars Ellenberg 849a910b123SLars Ellenberg /* reply buffer */ 850a910b123SLars Ellenberg struct sk_buff *reply_skb; 851a910b123SLars Ellenberg /* pointer into reply buffer */ 852a910b123SLars Ellenberg struct drbd_genlmsghdr *reply_dh; 853a910b123SLars Ellenberg /* resolved from attributes, if possible */ 854a910b123SLars Ellenberg struct drbd_device *device; 855a910b123SLars Ellenberg struct drbd_resource *resource; 856a910b123SLars Ellenberg struct drbd_connection *connection; 857a910b123SLars Ellenberg }; 858a910b123SLars Ellenberg 859b30ab791SAndreas Gruenbacher static inline struct drbd_device *minor_to_device(unsigned int minor) 860b411b363SPhilipp Reisner { 86105a10ec7SAndreas Gruenbacher return (struct drbd_device *)idr_find(&drbd_devices, minor); 862b411b363SPhilipp Reisner } 863b411b363SPhilipp Reisner 864a6b32bc3SAndreas Gruenbacher static inline struct drbd_peer_device *first_peer_device(struct drbd_device *device) 865a6b32bc3SAndreas Gruenbacher { 866ec4a3407SLars Ellenberg return list_first_entry_or_null(&device->peer_devices, struct drbd_peer_device, peer_devices); 867a6b32bc3SAndreas Gruenbacher } 868a6b32bc3SAndreas Gruenbacher 86977c556f6SAndreas Gruenbacher #define for_each_resource(resource, _resources) \ 87077c556f6SAndreas Gruenbacher list_for_each_entry(resource, _resources, resources) 87177c556f6SAndreas Gruenbacher 87277c556f6SAndreas Gruenbacher #define for_each_resource_rcu(resource, _resources) \ 87377c556f6SAndreas Gruenbacher list_for_each_entry_rcu(resource, _resources, resources) 87477c556f6SAndreas Gruenbacher 87577c556f6SAndreas Gruenbacher #define for_each_resource_safe(resource, tmp, _resources) \ 87677c556f6SAndreas Gruenbacher list_for_each_entry_safe(resource, tmp, _resources, resources) 87777c556f6SAndreas Gruenbacher 87877c556f6SAndreas Gruenbacher #define for_each_connection(connection, resource) \ 87977c556f6SAndreas Gruenbacher list_for_each_entry(connection, &resource->connections, connections) 88077c556f6SAndreas Gruenbacher 88177c556f6SAndreas Gruenbacher #define for_each_connection_rcu(connection, resource) \ 88277c556f6SAndreas Gruenbacher list_for_each_entry_rcu(connection, &resource->connections, connections) 88377c556f6SAndreas Gruenbacher 88477c556f6SAndreas Gruenbacher #define for_each_connection_safe(connection, tmp, resource) \ 88577c556f6SAndreas Gruenbacher list_for_each_entry_safe(connection, tmp, &resource->connections, connections) 88677c556f6SAndreas Gruenbacher 887a6b32bc3SAndreas Gruenbacher #define for_each_peer_device(peer_device, device) \ 888a6b32bc3SAndreas Gruenbacher list_for_each_entry(peer_device, &device->peer_devices, peer_devices) 889a6b32bc3SAndreas Gruenbacher 890a6b32bc3SAndreas Gruenbacher #define for_each_peer_device_rcu(peer_device, device) \ 891a6b32bc3SAndreas Gruenbacher list_for_each_entry_rcu(peer_device, &device->peer_devices, peer_devices) 892a6b32bc3SAndreas Gruenbacher 893a6b32bc3SAndreas Gruenbacher #define for_each_peer_device_safe(peer_device, tmp, device) \ 894a6b32bc3SAndreas Gruenbacher list_for_each_entry_safe(peer_device, tmp, &device->peer_devices, peer_devices) 895a6b32bc3SAndreas Gruenbacher 896b30ab791SAndreas Gruenbacher static inline unsigned int device_to_minor(struct drbd_device *device) 897b411b363SPhilipp Reisner { 898b30ab791SAndreas Gruenbacher return device->minor; 899b411b363SPhilipp Reisner } 900b411b363SPhilipp Reisner 901b411b363SPhilipp Reisner /* 902b411b363SPhilipp Reisner * function declarations 903b411b363SPhilipp Reisner *************************/ 904b411b363SPhilipp Reisner 905b411b363SPhilipp Reisner /* drbd_main.c */ 906b411b363SPhilipp Reisner 907e89b591cSPhilipp Reisner enum dds_flags { 908e89b591cSPhilipp Reisner DDSF_FORCED = 1, 909e89b591cSPhilipp Reisner DDSF_NO_RESYNC = 2, /* Do not run a resync for the new space */ 910e89b591cSPhilipp Reisner }; 911e89b591cSPhilipp Reisner 912b30ab791SAndreas Gruenbacher extern void drbd_init_set_defaults(struct drbd_device *device); 913b411b363SPhilipp Reisner extern int drbd_thread_start(struct drbd_thread *thi); 914b411b363SPhilipp Reisner extern void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait); 915b411b363SPhilipp Reisner #ifdef CONFIG_SMP 91680822284SPhilipp Reisner extern void drbd_thread_current_set_cpu(struct drbd_thread *thi); 917b411b363SPhilipp Reisner #else 918b411b363SPhilipp Reisner #define drbd_thread_current_set_cpu(A) ({}) 919b411b363SPhilipp Reisner #endif 920bde89a9eSAndreas Gruenbacher extern void tl_release(struct drbd_connection *, unsigned int barrier_nr, 921b411b363SPhilipp Reisner unsigned int set_size); 922bde89a9eSAndreas Gruenbacher extern void tl_clear(struct drbd_connection *); 923bde89a9eSAndreas Gruenbacher extern void drbd_free_sock(struct drbd_connection *connection); 924bde89a9eSAndreas Gruenbacher extern int drbd_send(struct drbd_connection *connection, struct socket *sock, 925b411b363SPhilipp Reisner void *buf, size_t size, unsigned msg_flags); 926bde89a9eSAndreas Gruenbacher extern int drbd_send_all(struct drbd_connection *, struct socket *, void *, size_t, 927fb708e40SAndreas Gruenbacher unsigned); 928fb708e40SAndreas Gruenbacher 929bde89a9eSAndreas Gruenbacher extern int __drbd_send_protocol(struct drbd_connection *connection, enum drbd_packet cmd); 930bde89a9eSAndreas Gruenbacher extern int drbd_send_protocol(struct drbd_connection *connection); 93169a22773SAndreas Gruenbacher extern int drbd_send_uuids(struct drbd_peer_device *); 93269a22773SAndreas Gruenbacher extern int drbd_send_uuids_skip_initial_sync(struct drbd_peer_device *); 93369a22773SAndreas Gruenbacher extern void drbd_gen_and_send_sync_uuid(struct drbd_peer_device *); 93469a22773SAndreas Gruenbacher extern int drbd_send_sizes(struct drbd_peer_device *, int trigger_reply, enum dds_flags flags); 93569a22773SAndreas Gruenbacher extern int drbd_send_state(struct drbd_peer_device *, union drbd_state s); 93669a22773SAndreas Gruenbacher extern int drbd_send_current_state(struct drbd_peer_device *); 93769a22773SAndreas Gruenbacher extern int drbd_send_sync_param(struct drbd_peer_device *); 938bde89a9eSAndreas Gruenbacher extern void drbd_send_b_ack(struct drbd_connection *connection, u32 barrier_nr, 939b411b363SPhilipp Reisner u32 set_size); 94069a22773SAndreas Gruenbacher extern int drbd_send_ack(struct drbd_peer_device *, enum drbd_packet, 941f6ffca9fSAndreas Gruenbacher struct drbd_peer_request *); 94269a22773SAndreas Gruenbacher extern void drbd_send_ack_rp(struct drbd_peer_device *, enum drbd_packet, 943b411b363SPhilipp Reisner struct p_block_req *rp); 94469a22773SAndreas Gruenbacher extern void drbd_send_ack_dp(struct drbd_peer_device *, enum drbd_packet, 9452b2bf214SLars Ellenberg struct p_data *dp, int data_size); 94669a22773SAndreas Gruenbacher extern int drbd_send_ack_ex(struct drbd_peer_device *, enum drbd_packet, 947b411b363SPhilipp Reisner sector_t sector, int blksize, u64 block_id); 94869a22773SAndreas Gruenbacher extern int drbd_send_out_of_sync(struct drbd_peer_device *, struct drbd_request *); 94969a22773SAndreas Gruenbacher extern int drbd_send_block(struct drbd_peer_device *, enum drbd_packet, 950f6ffca9fSAndreas Gruenbacher struct drbd_peer_request *); 95169a22773SAndreas Gruenbacher extern int drbd_send_dblock(struct drbd_peer_device *, struct drbd_request *req); 95269a22773SAndreas Gruenbacher extern int drbd_send_drequest(struct drbd_peer_device *, int cmd, 953b411b363SPhilipp Reisner sector_t sector, int size, u64 block_id); 95469a22773SAndreas Gruenbacher extern int drbd_send_drequest_csum(struct drbd_peer_device *, sector_t sector, 955d8763023SAndreas Gruenbacher int size, void *digest, int digest_size, 956d8763023SAndreas Gruenbacher enum drbd_packet cmd); 95769a22773SAndreas Gruenbacher extern int drbd_send_ov_request(struct drbd_peer_device *, sector_t sector, int size); 958b411b363SPhilipp Reisner 959b30ab791SAndreas Gruenbacher extern int drbd_send_bitmap(struct drbd_device *device); 96069a22773SAndreas Gruenbacher extern void drbd_send_sr_reply(struct drbd_peer_device *, enum drbd_state_rv retcode); 961bde89a9eSAndreas Gruenbacher extern void conn_send_sr_reply(struct drbd_connection *connection, enum drbd_state_rv retcode); 96228995af5SPhilipp Reisner extern void drbd_free_ldev(struct drbd_backing_dev *ldev); 963b30ab791SAndreas Gruenbacher extern void drbd_device_cleanup(struct drbd_device *device); 964b30ab791SAndreas Gruenbacher void drbd_print_uuids(struct drbd_device *device, const char *text); 965b411b363SPhilipp Reisner 966bde89a9eSAndreas Gruenbacher extern void conn_md_sync(struct drbd_connection *connection); 967b30ab791SAndreas Gruenbacher extern void drbd_md_write(struct drbd_device *device, void *buffer); 968b30ab791SAndreas Gruenbacher extern void drbd_md_sync(struct drbd_device *device); 969b30ab791SAndreas Gruenbacher extern int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev); 970b30ab791SAndreas Gruenbacher extern void drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local); 971b30ab791SAndreas Gruenbacher extern void _drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local); 972b30ab791SAndreas Gruenbacher extern void drbd_uuid_new_current(struct drbd_device *device) __must_hold(local); 973b30ab791SAndreas Gruenbacher extern void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local); 974b30ab791SAndreas Gruenbacher extern void drbd_uuid_move_history(struct drbd_device *device) __must_hold(local); 975b30ab791SAndreas Gruenbacher extern void __drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local); 976b30ab791SAndreas Gruenbacher extern void drbd_md_set_flag(struct drbd_device *device, int flags) __must_hold(local); 977b30ab791SAndreas Gruenbacher extern void drbd_md_clear_flag(struct drbd_device *device, int flags)__must_hold(local); 978b411b363SPhilipp Reisner extern int drbd_md_test_flag(struct drbd_backing_dev *, int); 979b30ab791SAndreas Gruenbacher extern void drbd_md_mark_dirty(struct drbd_device *device); 980b30ab791SAndreas Gruenbacher extern void drbd_queue_bitmap_io(struct drbd_device *device, 98154761697SAndreas Gruenbacher int (*io_fn)(struct drbd_device *), 98254761697SAndreas Gruenbacher void (*done)(struct drbd_device *, int), 98320ceb2b2SLars Ellenberg char *why, enum bm_flag flags); 984b30ab791SAndreas Gruenbacher extern int drbd_bitmap_io(struct drbd_device *device, 98554761697SAndreas Gruenbacher int (*io_fn)(struct drbd_device *), 98620ceb2b2SLars Ellenberg char *why, enum bm_flag flags); 987b30ab791SAndreas Gruenbacher extern int drbd_bitmap_io_from_worker(struct drbd_device *device, 98854761697SAndreas Gruenbacher int (*io_fn)(struct drbd_device *), 989edc9f5ebSLars Ellenberg char *why, enum bm_flag flags); 9908fe39aacSPhilipp Reisner extern int drbd_bmio_set_n_write(struct drbd_device *device) __must_hold(local); 9918fe39aacSPhilipp Reisner extern int drbd_bmio_clear_n_write(struct drbd_device *device) __must_hold(local); 992b411b363SPhilipp Reisner 993b411b363SPhilipp Reisner /* Meta data layout 994ae8bf312SLars Ellenberg * 995ae8bf312SLars Ellenberg * We currently have two possible layouts. 996ae8bf312SLars Ellenberg * Offsets in (512 byte) sectors. 997ae8bf312SLars Ellenberg * external: 998ae8bf312SLars Ellenberg * |----------- md_size_sect ------------------| 999ae8bf312SLars Ellenberg * [ 4k superblock ][ activity log ][ Bitmap ] 1000ae8bf312SLars Ellenberg * | al_offset == 8 | 1001ae8bf312SLars Ellenberg * | bm_offset = al_offset + X | 1002ae8bf312SLars Ellenberg * ==> bitmap sectors = md_size_sect - bm_offset 1003ae8bf312SLars Ellenberg * 1004ae8bf312SLars Ellenberg * Variants: 1005ae8bf312SLars Ellenberg * old, indexed fixed size meta data: 1006ae8bf312SLars Ellenberg * 1007ae8bf312SLars Ellenberg * internal: 1008ae8bf312SLars Ellenberg * |----------- md_size_sect ------------------| 1009ae8bf312SLars Ellenberg * [data.....][ Bitmap ][ activity log ][ 4k superblock ][padding*] 1010ae8bf312SLars Ellenberg * | al_offset < 0 | 1011ae8bf312SLars Ellenberg * | bm_offset = al_offset - Y | 1012ae8bf312SLars Ellenberg * ==> bitmap sectors = Y = al_offset - bm_offset 1013ae8bf312SLars Ellenberg * 1014ae8bf312SLars Ellenberg * [padding*] are zero or up to 7 unused 512 Byte sectors to the 1015ae8bf312SLars Ellenberg * end of the device, so that the [4k superblock] will be 4k aligned. 1016ae8bf312SLars Ellenberg * 1017ae8bf312SLars Ellenberg * The activity log consists of 4k transaction blocks, 1018ae8bf312SLars Ellenberg * which are written in a ring-buffer, or striped ring-buffer like fashion, 1019ae8bf312SLars Ellenberg * which are writtensize used to be fixed 32kB, 1020ae8bf312SLars Ellenberg * but is about to become configurable. 1021ae8bf312SLars Ellenberg */ 1022b411b363SPhilipp Reisner 1023ae8bf312SLars Ellenberg /* Our old fixed size meta data layout 1024ae8bf312SLars Ellenberg * allows up to about 3.8TB, so if you want more, 10257ad651b5SLars Ellenberg * you need to use the "flexible" meta data format. */ 1026ae8bf312SLars Ellenberg #define MD_128MB_SECT (128LLU << 11) /* 128 MB, unit sectors */ 1027ae8bf312SLars Ellenberg #define MD_4kB_SECT 8 1028ae8bf312SLars Ellenberg #define MD_32kB_SECT 64 1029b411b363SPhilipp Reisner 10307ad651b5SLars Ellenberg /* One activity log extent represents 4M of storage */ 10317ad651b5SLars Ellenberg #define AL_EXTENT_SHIFT 22 1032b411b363SPhilipp Reisner #define AL_EXTENT_SIZE (1<<AL_EXTENT_SHIFT) 1033b411b363SPhilipp Reisner 10347ad651b5SLars Ellenberg /* We could make these currently hardcoded constants configurable 10357ad651b5SLars Ellenberg * variables at create-md time (or even re-configurable at runtime?). 10367ad651b5SLars Ellenberg * Which will require some more changes to the DRBD "super block" 10377ad651b5SLars Ellenberg * and attach code. 10387ad651b5SLars Ellenberg * 10397ad651b5SLars Ellenberg * updates per transaction: 10407ad651b5SLars Ellenberg * This many changes to the active set can be logged with one transaction. 10417ad651b5SLars Ellenberg * This number is arbitrary. 10427ad651b5SLars Ellenberg * context per transaction: 10437ad651b5SLars Ellenberg * This many context extent numbers are logged with each transaction. 10447ad651b5SLars Ellenberg * This number is resulting from the transaction block size (4k), the layout 10457ad651b5SLars Ellenberg * of the transaction header, and the number of updates per transaction. 10467ad651b5SLars Ellenberg * See drbd_actlog.c:struct al_transaction_on_disk 10477ad651b5SLars Ellenberg * */ 10487ad651b5SLars Ellenberg #define AL_UPDATES_PER_TRANSACTION 64 // arbitrary 10497ad651b5SLars Ellenberg #define AL_CONTEXT_PER_TRANSACTION 919 // (4096 - 36 - 6*64)/4 10507ad651b5SLars Ellenberg 1051b411b363SPhilipp Reisner #if BITS_PER_LONG == 32 1052b411b363SPhilipp Reisner #define LN2_BPL 5 1053b411b363SPhilipp Reisner #define cpu_to_lel(A) cpu_to_le32(A) 1054b411b363SPhilipp Reisner #define lel_to_cpu(A) le32_to_cpu(A) 1055b411b363SPhilipp Reisner #elif BITS_PER_LONG == 64 1056b411b363SPhilipp Reisner #define LN2_BPL 6 1057b411b363SPhilipp Reisner #define cpu_to_lel(A) cpu_to_le64(A) 1058b411b363SPhilipp Reisner #define lel_to_cpu(A) le64_to_cpu(A) 1059b411b363SPhilipp Reisner #else 1060b411b363SPhilipp Reisner #error "LN2 of BITS_PER_LONG unknown!" 1061b411b363SPhilipp Reisner #endif 1062b411b363SPhilipp Reisner 1063b411b363SPhilipp Reisner /* resync bitmap */ 1064b411b363SPhilipp Reisner /* 16MB sized 'bitmap extent' to track syncer usage */ 1065b411b363SPhilipp Reisner struct bm_extent { 1066b411b363SPhilipp Reisner int rs_left; /* number of bits set (out of sync) in this extent. */ 1067b411b363SPhilipp Reisner int rs_failed; /* number of failed resync requests in this extent. */ 1068b411b363SPhilipp Reisner unsigned long flags; 1069b411b363SPhilipp Reisner struct lc_element lce; 1070b411b363SPhilipp Reisner }; 1071b411b363SPhilipp Reisner 1072b411b363SPhilipp Reisner #define BME_NO_WRITES 0 /* bm_extent.flags: no more requests on this one! */ 1073b411b363SPhilipp Reisner #define BME_LOCKED 1 /* bm_extent.flags: syncer active on this one. */ 1074e3555d85SPhilipp Reisner #define BME_PRIORITY 2 /* finish resync IO on this extent ASAP! App IO waiting! */ 1075b411b363SPhilipp Reisner 1076b411b363SPhilipp Reisner /* drbd_bitmap.c */ 1077b411b363SPhilipp Reisner /* 1078b411b363SPhilipp Reisner * We need to store one bit for a block. 1079b411b363SPhilipp Reisner * Example: 1GB disk @ 4096 byte blocks ==> we need 32 KB bitmap. 1080b411b363SPhilipp Reisner * Bit 0 ==> local node thinks this block is binary identical on both nodes 1081b411b363SPhilipp Reisner * Bit 1 ==> local node thinks this block needs to be synced. 1082b411b363SPhilipp Reisner */ 1083b411b363SPhilipp Reisner 10848e26f9ccSPhilipp Reisner #define SLEEP_TIME (HZ/10) 10858e26f9ccSPhilipp Reisner 108645dfffebSLars Ellenberg /* We do bitmap IO in units of 4k blocks. 108745dfffebSLars Ellenberg * We also still have a hardcoded 4k per bit relation. */ 1088b411b363SPhilipp Reisner #define BM_BLOCK_SHIFT 12 /* 4k per bit */ 1089b411b363SPhilipp Reisner #define BM_BLOCK_SIZE (1<<BM_BLOCK_SHIFT) 109045dfffebSLars Ellenberg /* mostly arbitrarily set the represented size of one bitmap extent, 109145dfffebSLars Ellenberg * aka resync extent, to 16 MiB (which is also 512 Byte worth of bitmap 109245dfffebSLars Ellenberg * at 4k per bit resolution) */ 109345dfffebSLars Ellenberg #define BM_EXT_SHIFT 24 /* 16 MiB per resync extent */ 1094b411b363SPhilipp Reisner #define BM_EXT_SIZE (1<<BM_EXT_SHIFT) 1095b411b363SPhilipp Reisner 1096b411b363SPhilipp Reisner #if (BM_EXT_SHIFT != 24) || (BM_BLOCK_SHIFT != 12) 1097b411b363SPhilipp Reisner #error "HAVE YOU FIXED drbdmeta AS WELL??" 1098b411b363SPhilipp Reisner #endif 1099b411b363SPhilipp Reisner 1100b411b363SPhilipp Reisner /* thus many _storage_ sectors are described by one bit */ 1101b411b363SPhilipp Reisner #define BM_SECT_TO_BIT(x) ((x)>>(BM_BLOCK_SHIFT-9)) 1102b411b363SPhilipp Reisner #define BM_BIT_TO_SECT(x) ((sector_t)(x)<<(BM_BLOCK_SHIFT-9)) 1103b411b363SPhilipp Reisner #define BM_SECT_PER_BIT BM_BIT_TO_SECT(1) 1104b411b363SPhilipp Reisner 1105b411b363SPhilipp Reisner /* bit to represented kilo byte conversion */ 1106b411b363SPhilipp Reisner #define Bit2KB(bits) ((bits)<<(BM_BLOCK_SHIFT-10)) 1107b411b363SPhilipp Reisner 1108b411b363SPhilipp Reisner /* in which _bitmap_ extent (resp. sector) the bit for a certain 1109b411b363SPhilipp Reisner * _storage_ sector is located in */ 1110b411b363SPhilipp Reisner #define BM_SECT_TO_EXT(x) ((x)>>(BM_EXT_SHIFT-9)) 11115ab7d2c0SLars Ellenberg #define BM_BIT_TO_EXT(x) ((x) >> (BM_EXT_SHIFT - BM_BLOCK_SHIFT)) 1112b411b363SPhilipp Reisner 11135ab7d2c0SLars Ellenberg /* first storage sector a bitmap extent corresponds to */ 1114b411b363SPhilipp Reisner #define BM_EXT_TO_SECT(x) ((sector_t)(x) << (BM_EXT_SHIFT-9)) 11155ab7d2c0SLars Ellenberg /* how much _storage_ sectors we have per bitmap extent */ 1116b411b363SPhilipp Reisner #define BM_SECT_PER_EXT BM_EXT_TO_SECT(1) 11175ab7d2c0SLars Ellenberg /* how many bits are covered by one bitmap extent (resync extent) */ 11185ab7d2c0SLars Ellenberg #define BM_BITS_PER_EXT (1UL << (BM_EXT_SHIFT - BM_BLOCK_SHIFT)) 11195ab7d2c0SLars Ellenberg 11205ab7d2c0SLars Ellenberg #define BM_BLOCKS_PER_BM_EXT_MASK (BM_BITS_PER_EXT - 1) 11215ab7d2c0SLars Ellenberg 1122b411b363SPhilipp Reisner 1123b411b363SPhilipp Reisner /* in one sector of the bitmap, we have this many activity_log extents. */ 1124b411b363SPhilipp Reisner #define AL_EXT_PER_BM_SECT (1 << (BM_EXT_SHIFT - AL_EXTENT_SHIFT)) 1125b411b363SPhilipp Reisner 1126b411b363SPhilipp Reisner /* the extent in "PER_EXTENT" below is an activity log extent 1127b411b363SPhilipp Reisner * we need that many (long words/bytes) to store the bitmap 1128b411b363SPhilipp Reisner * of one AL_EXTENT_SIZE chunk of storage. 1129b411b363SPhilipp Reisner * we can store the bitmap for that many AL_EXTENTS within 1130b411b363SPhilipp Reisner * one sector of the _on_disk_ bitmap: 1131b411b363SPhilipp Reisner * bit 0 bit 37 bit 38 bit (512*8)-1 1132b411b363SPhilipp Reisner * ...|........|........|.. // ..|........| 1133b411b363SPhilipp Reisner * sect. 0 `296 `304 ^(512*8*8)-1 1134b411b363SPhilipp Reisner * 1135b411b363SPhilipp Reisner #define BM_WORDS_PER_EXT ( (AL_EXT_SIZE/BM_BLOCK_SIZE) / BITS_PER_LONG ) 1136b411b363SPhilipp Reisner #define BM_BYTES_PER_EXT ( (AL_EXT_SIZE/BM_BLOCK_SIZE) / 8 ) // 128 1137b411b363SPhilipp Reisner #define BM_EXT_PER_SECT ( 512 / BM_BYTES_PER_EXTENT ) // 4 1138b411b363SPhilipp Reisner */ 1139b411b363SPhilipp Reisner 1140b411b363SPhilipp Reisner #define DRBD_MAX_SECTORS_32 (0xffffffffLU) 1141ae8bf312SLars Ellenberg /* we have a certain meta data variant that has a fixed on-disk size of 128 1142ae8bf312SLars Ellenberg * MiB, of which 4k are our "superblock", and 32k are the fixed size activity 1143ae8bf312SLars Ellenberg * log, leaving this many sectors for the bitmap. 1144ae8bf312SLars Ellenberg */ 1145ae8bf312SLars Ellenberg 1146ae8bf312SLars Ellenberg #define DRBD_MAX_SECTORS_FIXED_BM \ 1147ae8bf312SLars Ellenberg ((MD_128MB_SECT - MD_32kB_SECT - MD_4kB_SECT) * (1LL<<(BM_EXT_SHIFT-9))) 1148ae8bf312SLars Ellenberg #if !defined(CONFIG_LBDAF) && BITS_PER_LONG == 32 1149b411b363SPhilipp Reisner #define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_32 1150b411b363SPhilipp Reisner #define DRBD_MAX_SECTORS_FLEX DRBD_MAX_SECTORS_32 1151b411b363SPhilipp Reisner #else 1152ae8bf312SLars Ellenberg #define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_FIXED_BM 1153b411b363SPhilipp Reisner /* 16 TB in units of sectors */ 1154b411b363SPhilipp Reisner #if BITS_PER_LONG == 32 1155b411b363SPhilipp Reisner /* adjust by one page worth of bitmap, 1156b411b363SPhilipp Reisner * so we won't wrap around in drbd_bm_find_next_bit. 1157b411b363SPhilipp Reisner * you should use 64bit OS for that much storage, anyways. */ 1158b411b363SPhilipp Reisner #define DRBD_MAX_SECTORS_FLEX BM_BIT_TO_SECT(0xffff7fff) 1159b411b363SPhilipp Reisner #else 11604b0715f0SLars Ellenberg /* we allow up to 1 PiB now on 64bit architecture with "flexible" meta data */ 11614b0715f0SLars Ellenberg #define DRBD_MAX_SECTORS_FLEX (1UL << 51) 11624b0715f0SLars Ellenberg /* corresponds to (1UL << 38) bits right now. */ 1163b411b363SPhilipp Reisner #endif 1164b411b363SPhilipp Reisner #endif 1165b411b363SPhilipp Reisner 116623361cf3SLars Ellenberg /* BIO_MAX_SIZE is 256 * PAGE_CACHE_SIZE, 116723361cf3SLars Ellenberg * so for typical PAGE_CACHE_SIZE of 4k, that is (1<<20) Byte. 116823361cf3SLars Ellenberg * Since we may live in a mixed-platform cluster, 116923361cf3SLars Ellenberg * we limit us to a platform agnostic constant here for now. 117023361cf3SLars Ellenberg * A followup commit may allow even bigger BIO sizes, 117123361cf3SLars Ellenberg * once we thought that through. */ 117298683650SPhilipp Reisner #define DRBD_MAX_BIO_SIZE (1U << 20) 117323361cf3SLars Ellenberg #if DRBD_MAX_BIO_SIZE > BIO_MAX_SIZE 117423361cf3SLars Ellenberg #error Architecture not supported: DRBD_MAX_BIO_SIZE > BIO_MAX_SIZE 117523361cf3SLars Ellenberg #endif 1176db141b2fSLars Ellenberg #define DRBD_MAX_BIO_SIZE_SAFE (1U << 12) /* Works always = 4k */ 1177b411b363SPhilipp Reisner 117898683650SPhilipp Reisner #define DRBD_MAX_SIZE_H80_PACKET (1U << 15) /* Header 80 only allows packets up to 32KiB data */ 117998683650SPhilipp Reisner #define DRBD_MAX_BIO_SIZE_P95 (1U << 17) /* Protocol 95 to 99 allows bios up to 128KiB */ 1180b411b363SPhilipp Reisner 1181a0fb3c47SLars Ellenberg /* For now, don't allow more than one activity log extent worth of data 1182a0fb3c47SLars Ellenberg * to be discarded in one go. We may need to rework drbd_al_begin_io() 1183a0fb3c47SLars Ellenberg * to allow for even larger discard ranges */ 1184a0fb3c47SLars Ellenberg #define DRBD_MAX_DISCARD_SIZE AL_EXTENT_SIZE 1185a0fb3c47SLars Ellenberg #define DRBD_MAX_DISCARD_SECTORS (DRBD_MAX_DISCARD_SIZE >> 9) 1186a0fb3c47SLars Ellenberg 1187b30ab791SAndreas Gruenbacher extern int drbd_bm_init(struct drbd_device *device); 1188b30ab791SAndreas Gruenbacher extern int drbd_bm_resize(struct drbd_device *device, sector_t sectors, int set_new_bits); 1189b30ab791SAndreas Gruenbacher extern void drbd_bm_cleanup(struct drbd_device *device); 1190b30ab791SAndreas Gruenbacher extern void drbd_bm_set_all(struct drbd_device *device); 1191b30ab791SAndreas Gruenbacher extern void drbd_bm_clear_all(struct drbd_device *device); 11924b0715f0SLars Ellenberg /* set/clear/test only a few bits at a time */ 1193b411b363SPhilipp Reisner extern int drbd_bm_set_bits( 1194b30ab791SAndreas Gruenbacher struct drbd_device *device, unsigned long s, unsigned long e); 1195b411b363SPhilipp Reisner extern int drbd_bm_clear_bits( 1196b30ab791SAndreas Gruenbacher struct drbd_device *device, unsigned long s, unsigned long e); 11974b0715f0SLars Ellenberg extern int drbd_bm_count_bits( 1198b30ab791SAndreas Gruenbacher struct drbd_device *device, const unsigned long s, const unsigned long e); 11994b0715f0SLars Ellenberg /* bm_set_bits variant for use while holding drbd_bm_lock, 12004b0715f0SLars Ellenberg * may process the whole bitmap in one go */ 1201b30ab791SAndreas Gruenbacher extern void _drbd_bm_set_bits(struct drbd_device *device, 1202b411b363SPhilipp Reisner const unsigned long s, const unsigned long e); 1203b30ab791SAndreas Gruenbacher extern int drbd_bm_test_bit(struct drbd_device *device, unsigned long bitnr); 1204b30ab791SAndreas Gruenbacher extern int drbd_bm_e_weight(struct drbd_device *device, unsigned long enr); 1205b30ab791SAndreas Gruenbacher extern int drbd_bm_read(struct drbd_device *device) __must_hold(local); 1206b30ab791SAndreas Gruenbacher extern void drbd_bm_mark_for_writeout(struct drbd_device *device, int page_nr); 1207b30ab791SAndreas Gruenbacher extern int drbd_bm_write(struct drbd_device *device) __must_hold(local); 1208b30ab791SAndreas Gruenbacher extern int drbd_bm_write_hinted(struct drbd_device *device) __must_hold(local); 1209c7a58db4SLars Ellenberg extern int drbd_bm_write_lazy(struct drbd_device *device, unsigned upper_idx) __must_hold(local); 1210b30ab791SAndreas Gruenbacher extern int drbd_bm_write_all(struct drbd_device *device) __must_hold(local); 1211b30ab791SAndreas Gruenbacher extern int drbd_bm_write_copy_pages(struct drbd_device *device) __must_hold(local); 1212b30ab791SAndreas Gruenbacher extern size_t drbd_bm_words(struct drbd_device *device); 1213b30ab791SAndreas Gruenbacher extern unsigned long drbd_bm_bits(struct drbd_device *device); 1214b30ab791SAndreas Gruenbacher extern sector_t drbd_bm_capacity(struct drbd_device *device); 12154b0715f0SLars Ellenberg 12164b0715f0SLars Ellenberg #define DRBD_END_OF_BITMAP (~(unsigned long)0) 1217b30ab791SAndreas Gruenbacher extern unsigned long drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo); 1218b411b363SPhilipp Reisner /* bm_find_next variants for use while you hold drbd_bm_lock() */ 1219b30ab791SAndreas Gruenbacher extern unsigned long _drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo); 1220b30ab791SAndreas Gruenbacher extern unsigned long _drbd_bm_find_next_zero(struct drbd_device *device, unsigned long bm_fo); 1221b30ab791SAndreas Gruenbacher extern unsigned long _drbd_bm_total_weight(struct drbd_device *device); 1222b30ab791SAndreas Gruenbacher extern unsigned long drbd_bm_total_weight(struct drbd_device *device); 1223b411b363SPhilipp Reisner /* for receive_bitmap */ 1224b30ab791SAndreas Gruenbacher extern void drbd_bm_merge_lel(struct drbd_device *device, size_t offset, 1225b411b363SPhilipp Reisner size_t number, unsigned long *buffer); 122619f843aaSLars Ellenberg /* for _drbd_send_bitmap */ 1227b30ab791SAndreas Gruenbacher extern void drbd_bm_get_lel(struct drbd_device *device, size_t offset, 1228b411b363SPhilipp Reisner size_t number, unsigned long *buffer); 1229b411b363SPhilipp Reisner 1230b30ab791SAndreas Gruenbacher extern void drbd_bm_lock(struct drbd_device *device, char *why, enum bm_flag flags); 1231b30ab791SAndreas Gruenbacher extern void drbd_bm_unlock(struct drbd_device *device); 1232b411b363SPhilipp Reisner /* drbd_main.c */ 1233b411b363SPhilipp Reisner 1234b411b363SPhilipp Reisner extern struct kmem_cache *drbd_request_cache; 12356c852becSAndreas Gruenbacher extern struct kmem_cache *drbd_ee_cache; /* peer requests */ 1236b411b363SPhilipp Reisner extern struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */ 1237b411b363SPhilipp Reisner extern struct kmem_cache *drbd_al_ext_cache; /* activity log extents */ 1238b411b363SPhilipp Reisner extern mempool_t *drbd_request_mempool; 1239b411b363SPhilipp Reisner extern mempool_t *drbd_ee_mempool; 1240b411b363SPhilipp Reisner 12414281808fSLars Ellenberg /* drbd's page pool, used to buffer data received from the peer, 12424281808fSLars Ellenberg * or data requested by the peer. 12434281808fSLars Ellenberg * 12444281808fSLars Ellenberg * This does not have an emergency reserve. 12454281808fSLars Ellenberg * 12464281808fSLars Ellenberg * When allocating from this pool, it first takes pages from the pool. 12474281808fSLars Ellenberg * Only if the pool is depleted will try to allocate from the system. 12484281808fSLars Ellenberg * 12494281808fSLars Ellenberg * The assumption is that pages taken from this pool will be processed, 12504281808fSLars Ellenberg * and given back, "quickly", and then can be recycled, so we can avoid 12514281808fSLars Ellenberg * frequent calls to alloc_page(), and still will be able to make progress even 12524281808fSLars Ellenberg * under memory pressure. 12534281808fSLars Ellenberg */ 12544281808fSLars Ellenberg extern struct page *drbd_pp_pool; 1255b411b363SPhilipp Reisner extern spinlock_t drbd_pp_lock; 1256b411b363SPhilipp Reisner extern int drbd_pp_vacant; 1257b411b363SPhilipp Reisner extern wait_queue_head_t drbd_pp_wait; 1258b411b363SPhilipp Reisner 12594281808fSLars Ellenberg /* We also need a standard (emergency-reserve backed) page pool 12604281808fSLars Ellenberg * for meta data IO (activity log, bitmap). 12614281808fSLars Ellenberg * We can keep it global, as long as it is used as "N pages at a time". 12624281808fSLars Ellenberg * 128 should be plenty, currently we probably can get away with as few as 1. 12634281808fSLars Ellenberg */ 12644281808fSLars Ellenberg #define DRBD_MIN_POOL_PAGES 128 12654281808fSLars Ellenberg extern mempool_t *drbd_md_io_page_pool; 12664281808fSLars Ellenberg 12679476f39dSLars Ellenberg /* We also need to make sure we get a bio 12689476f39dSLars Ellenberg * when we need it for housekeeping purposes */ 12699476f39dSLars Ellenberg extern struct bio_set *drbd_md_io_bio_set; 12709476f39dSLars Ellenberg /* to allocate from that set */ 12719476f39dSLars Ellenberg extern struct bio *bio_alloc_drbd(gfp_t gfp_mask); 12729476f39dSLars Ellenberg 1273b411b363SPhilipp Reisner extern rwlock_t global_state_lock; 1274b411b363SPhilipp Reisner 1275bde89a9eSAndreas Gruenbacher extern int conn_lowest_minor(struct drbd_connection *connection); 1276a910b123SLars Ellenberg extern enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsigned int minor); 127705a10ec7SAndreas Gruenbacher extern void drbd_destroy_device(struct kref *kref); 1278a910b123SLars Ellenberg extern void drbd_delete_device(struct drbd_device *device); 1279b411b363SPhilipp Reisner 128077c556f6SAndreas Gruenbacher extern struct drbd_resource *drbd_create_resource(const char *name); 128177c556f6SAndreas Gruenbacher extern void drbd_free_resource(struct drbd_resource *resource); 128277c556f6SAndreas Gruenbacher 1283eb6bea67SAndreas Gruenbacher extern int set_resource_options(struct drbd_resource *resource, struct res_opts *res_opts); 1284bde89a9eSAndreas Gruenbacher extern struct drbd_connection *conn_create(const char *name, struct res_opts *res_opts); 128505a10ec7SAndreas Gruenbacher extern void drbd_destroy_connection(struct kref *kref); 1286bde89a9eSAndreas Gruenbacher extern struct drbd_connection *conn_get_by_addrs(void *my_addr, int my_addr_len, 1287089c075dSAndreas Gruenbacher void *peer_addr, int peer_addr_len); 12884bc76048SAndreas Gruenbacher extern struct drbd_resource *drbd_find_resource(const char *name); 128977c556f6SAndreas Gruenbacher extern void drbd_destroy_resource(struct kref *kref); 1290bde89a9eSAndreas Gruenbacher extern void conn_free_crypto(struct drbd_connection *connection); 1291b411b363SPhilipp Reisner 1292b411b363SPhilipp Reisner extern int proc_details; 1293b411b363SPhilipp Reisner 1294b411b363SPhilipp Reisner /* drbd_req */ 1295113fef9eSLars Ellenberg extern void do_submit(struct work_struct *ws); 129654761697SAndreas Gruenbacher extern void __drbd_make_request(struct drbd_device *, struct bio *, unsigned long); 12975a7bbad2SChristoph Hellwig extern void drbd_make_request(struct request_queue *q, struct bio *bio); 1298b30ab791SAndreas Gruenbacher extern int drbd_read_remote(struct drbd_device *device, struct drbd_request *req); 1299b411b363SPhilipp Reisner extern int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec); 1300b411b363SPhilipp Reisner extern int is_valid_ar_handle(struct drbd_request *, sector_t); 1301b411b363SPhilipp Reisner 1302b411b363SPhilipp Reisner 1303b411b363SPhilipp Reisner /* drbd_nl.c */ 1304a910b123SLars Ellenberg extern int drbd_msg_put_info(struct sk_buff *skb, const char *info); 1305b30ab791SAndreas Gruenbacher extern void drbd_suspend_io(struct drbd_device *device); 1306b30ab791SAndreas Gruenbacher extern void drbd_resume_io(struct drbd_device *device); 1307b411b363SPhilipp Reisner extern char *ppsize(char *buf, unsigned long long size); 130854761697SAndreas Gruenbacher extern sector_t drbd_new_dev_size(struct drbd_device *, struct drbd_backing_dev *, sector_t, int); 1309e96c9633SPhilipp Reisner enum determine_dev_size { 1310d752b269SPhilipp Reisner DS_ERROR_SHRINK = -3, 1311d752b269SPhilipp Reisner DS_ERROR_SPACE_MD = -2, 1312e96c9633SPhilipp Reisner DS_ERROR = -1, 1313e96c9633SPhilipp Reisner DS_UNCHANGED = 0, 1314e96c9633SPhilipp Reisner DS_SHRUNK = 1, 131557737adcSPhilipp Reisner DS_GREW = 2, 131657737adcSPhilipp Reisner DS_GREW_FROM_ZERO = 3, 1317e96c9633SPhilipp Reisner }; 1318d752b269SPhilipp Reisner extern enum determine_dev_size 131954761697SAndreas Gruenbacher drbd_determine_dev_size(struct drbd_device *, enum dds_flags, struct resize_parms *) __must_hold(local); 132054761697SAndreas Gruenbacher extern void resync_after_online_grow(struct drbd_device *); 13218fe39aacSPhilipp Reisner extern void drbd_reconsider_max_bio_size(struct drbd_device *device, struct drbd_backing_dev *bdev); 1322b30ab791SAndreas Gruenbacher extern enum drbd_state_rv drbd_set_role(struct drbd_device *device, 1323bf885f8aSAndreas Gruenbacher enum drbd_role new_role, 1324b411b363SPhilipp Reisner int force); 1325bde89a9eSAndreas Gruenbacher extern bool conn_try_outdate_peer(struct drbd_connection *connection); 1326bde89a9eSAndreas Gruenbacher extern void conn_try_outdate_peer_async(struct drbd_connection *connection); 1327b30ab791SAndreas Gruenbacher extern int drbd_khelper(struct drbd_device *device, char *cmd); 1328b411b363SPhilipp Reisner 1329b411b363SPhilipp Reisner /* drbd_worker.c */ 1330d40e5671SPhilipp Reisner /* bi_end_io handlers */ 1331d40e5671SPhilipp Reisner extern void drbd_md_io_complete(struct bio *bio, int error); 1332d40e5671SPhilipp Reisner extern void drbd_peer_request_endio(struct bio *bio, int error); 1333d40e5671SPhilipp Reisner extern void drbd_request_endio(struct bio *bio, int error); 1334b411b363SPhilipp Reisner extern int drbd_worker(struct drbd_thread *thi); 1335b30ab791SAndreas Gruenbacher enum drbd_ret_code drbd_resync_after_valid(struct drbd_device *device, int o_minor); 1336b30ab791SAndreas Gruenbacher void drbd_resync_after_changed(struct drbd_device *device); 1337b30ab791SAndreas Gruenbacher extern void drbd_start_resync(struct drbd_device *device, enum drbd_conns side); 1338b30ab791SAndreas Gruenbacher extern void resume_next_sg(struct drbd_device *device); 1339b30ab791SAndreas Gruenbacher extern void suspend_other_sg(struct drbd_device *device); 1340b30ab791SAndreas Gruenbacher extern int drbd_resync_finished(struct drbd_device *device); 1341b411b363SPhilipp Reisner /* maybe rather drbd_main.c ? */ 1342*e37d2438SLars Ellenberg extern void *drbd_md_get_buffer(struct drbd_device *device, const char *intent); 1343b30ab791SAndreas Gruenbacher extern void drbd_md_put_buffer(struct drbd_device *device); 1344b30ab791SAndreas Gruenbacher extern int drbd_md_sync_page_io(struct drbd_device *device, 1345b411b363SPhilipp Reisner struct drbd_backing_dev *bdev, sector_t sector, int rw); 134654761697SAndreas Gruenbacher extern void drbd_ov_out_of_sync_found(struct drbd_device *, sector_t, int); 1347b30ab791SAndreas Gruenbacher extern void wait_until_done_or_force_detached(struct drbd_device *device, 134844edfb0dSLars Ellenberg struct drbd_backing_dev *bdev, unsigned int *done); 1349b30ab791SAndreas Gruenbacher extern void drbd_rs_controller_reset(struct drbd_device *device); 1350b411b363SPhilipp Reisner 1351b30ab791SAndreas Gruenbacher static inline void ov_out_of_sync_print(struct drbd_device *device) 1352b411b363SPhilipp Reisner { 1353b30ab791SAndreas Gruenbacher if (device->ov_last_oos_size) { 1354d0180171SAndreas Gruenbacher drbd_err(device, "Out of sync: start=%llu, size=%lu (sectors)\n", 1355b30ab791SAndreas Gruenbacher (unsigned long long)device->ov_last_oos_start, 1356b30ab791SAndreas Gruenbacher (unsigned long)device->ov_last_oos_size); 1357b411b363SPhilipp Reisner } 1358b30ab791SAndreas Gruenbacher device->ov_last_oos_size = 0; 1359b411b363SPhilipp Reisner } 1360b411b363SPhilipp Reisner 1361b411b363SPhilipp Reisner 136279a3c8d3SAndreas Gruenbacher extern void drbd_csum_bio(struct crypto_hash *, struct bio *, void *); 136379a3c8d3SAndreas Gruenbacher extern void drbd_csum_ee(struct crypto_hash *, struct drbd_peer_request *, void *); 1364b411b363SPhilipp Reisner /* worker callbacks */ 136599920dc5SAndreas Gruenbacher extern int w_e_end_data_req(struct drbd_work *, int); 136699920dc5SAndreas Gruenbacher extern int w_e_end_rsdata_req(struct drbd_work *, int); 136799920dc5SAndreas Gruenbacher extern int w_e_end_csum_rs_req(struct drbd_work *, int); 136899920dc5SAndreas Gruenbacher extern int w_e_end_ov_reply(struct drbd_work *, int); 136999920dc5SAndreas Gruenbacher extern int w_e_end_ov_req(struct drbd_work *, int); 137099920dc5SAndreas Gruenbacher extern int w_ov_finished(struct drbd_work *, int); 137199920dc5SAndreas Gruenbacher extern int w_resync_timer(struct drbd_work *, int); 137299920dc5SAndreas Gruenbacher extern int w_send_write_hint(struct drbd_work *, int); 137399920dc5SAndreas Gruenbacher extern int w_send_dblock(struct drbd_work *, int); 137499920dc5SAndreas Gruenbacher extern int w_send_read_req(struct drbd_work *, int); 137599920dc5SAndreas Gruenbacher extern int w_e_reissue(struct drbd_work *, int); 137699920dc5SAndreas Gruenbacher extern int w_restart_disk_io(struct drbd_work *, int); 13778f7bed77SAndreas Gruenbacher extern int w_send_out_of_sync(struct drbd_work *, int); 137899920dc5SAndreas Gruenbacher extern int w_start_resync(struct drbd_work *, int); 1379b411b363SPhilipp Reisner 1380b411b363SPhilipp Reisner extern void resync_timer_fn(unsigned long data); 1381370a43e7SPhilipp Reisner extern void start_resync_timer_fn(unsigned long data); 1382b411b363SPhilipp Reisner 1383a0fb3c47SLars Ellenberg extern void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req); 1384a0fb3c47SLars Ellenberg 1385b411b363SPhilipp Reisner /* drbd_receiver.c */ 1386753c6191SAndreas Gruenbacher extern int drbd_receiver(struct drbd_thread *thi); 1387753c6191SAndreas Gruenbacher extern int drbd_asender(struct drbd_thread *thi); 1388e8299874SLars Ellenberg extern bool drbd_rs_c_min_rate_throttle(struct drbd_device *device); 1389e8299874SLars Ellenberg extern bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector); 139054761697SAndreas Gruenbacher extern int drbd_submit_peer_request(struct drbd_device *, 1391fbe29decSAndreas Gruenbacher struct drbd_peer_request *, const unsigned, 1392fbe29decSAndreas Gruenbacher const int); 139354761697SAndreas Gruenbacher extern int drbd_free_peer_reqs(struct drbd_device *, struct list_head *); 139469a22773SAndreas Gruenbacher extern struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_peer_device *, u64, 13950db55363SAndreas Gruenbacher sector_t, unsigned int, 1396a0fb3c47SLars Ellenberg bool, 1397f6ffca9fSAndreas Gruenbacher gfp_t) __must_hold(local); 139854761697SAndreas Gruenbacher extern void __drbd_free_peer_req(struct drbd_device *, struct drbd_peer_request *, 1399f6ffca9fSAndreas Gruenbacher int); 14003967deb1SAndreas Gruenbacher #define drbd_free_peer_req(m,e) __drbd_free_peer_req(m, e, 0) 14013967deb1SAndreas Gruenbacher #define drbd_free_net_peer_req(m,e) __drbd_free_peer_req(m, e, 1) 140269a22773SAndreas Gruenbacher extern struct page *drbd_alloc_pages(struct drbd_peer_device *, unsigned int, bool); 1403b30ab791SAndreas Gruenbacher extern void drbd_set_recv_tcq(struct drbd_device *device, int tcq_enabled); 1404b30ab791SAndreas Gruenbacher extern void _drbd_clear_done_ee(struct drbd_device *device, struct list_head *to_be_freed); 140569a22773SAndreas Gruenbacher extern int drbd_connected(struct drbd_peer_device *); 1406b411b363SPhilipp Reisner 1407ed439848SLars Ellenberg /* Yes, there is kernel_setsockopt, but only since 2.6.18. 1408ed439848SLars Ellenberg * So we have our own copy of it here. */ 1409b411b363SPhilipp Reisner static inline int drbd_setsockopt(struct socket *sock, int level, int optname, 1410ed439848SLars Ellenberg char *optval, int optlen) 1411b411b363SPhilipp Reisner { 1412ed439848SLars Ellenberg mm_segment_t oldfs = get_fs(); 1413ed439848SLars Ellenberg char __user *uoptval; 1414b411b363SPhilipp Reisner int err; 1415ed439848SLars Ellenberg 1416ed439848SLars Ellenberg uoptval = (char __user __force *)optval; 1417ed439848SLars Ellenberg 1418ed439848SLars Ellenberg set_fs(KERNEL_DS); 1419b411b363SPhilipp Reisner if (level == SOL_SOCKET) 1420ed439848SLars Ellenberg err = sock_setsockopt(sock, level, optname, uoptval, optlen); 1421b411b363SPhilipp Reisner else 1422ed439848SLars Ellenberg err = sock->ops->setsockopt(sock, level, optname, uoptval, 1423b411b363SPhilipp Reisner optlen); 1424ed439848SLars Ellenberg set_fs(oldfs); 1425b411b363SPhilipp Reisner return err; 1426b411b363SPhilipp Reisner } 1427b411b363SPhilipp Reisner 1428b411b363SPhilipp Reisner static inline void drbd_tcp_cork(struct socket *sock) 1429b411b363SPhilipp Reisner { 1430ed439848SLars Ellenberg int val = 1; 1431b411b363SPhilipp Reisner (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK, 1432ed439848SLars Ellenberg (char*)&val, sizeof(val)); 1433b411b363SPhilipp Reisner } 1434b411b363SPhilipp Reisner 1435b411b363SPhilipp Reisner static inline void drbd_tcp_uncork(struct socket *sock) 1436b411b363SPhilipp Reisner { 1437ed439848SLars Ellenberg int val = 0; 1438b411b363SPhilipp Reisner (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK, 1439ed439848SLars Ellenberg (char*)&val, sizeof(val)); 1440b411b363SPhilipp Reisner } 1441b411b363SPhilipp Reisner 1442b411b363SPhilipp Reisner static inline void drbd_tcp_nodelay(struct socket *sock) 1443b411b363SPhilipp Reisner { 1444ed439848SLars Ellenberg int val = 1; 1445b411b363SPhilipp Reisner (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY, 1446ed439848SLars Ellenberg (char*)&val, sizeof(val)); 1447b411b363SPhilipp Reisner } 1448b411b363SPhilipp Reisner 1449b411b363SPhilipp Reisner static inline void drbd_tcp_quickack(struct socket *sock) 1450b411b363SPhilipp Reisner { 1451ed439848SLars Ellenberg int val = 2; 1452b411b363SPhilipp Reisner (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK, 1453ed439848SLars Ellenberg (char*)&val, sizeof(val)); 1454b411b363SPhilipp Reisner } 1455b411b363SPhilipp Reisner 1456d40e5671SPhilipp Reisner /* sets the number of 512 byte sectors of our virtual device */ 1457d40e5671SPhilipp Reisner static inline void drbd_set_my_capacity(struct drbd_device *device, 1458d40e5671SPhilipp Reisner sector_t size) 1459d40e5671SPhilipp Reisner { 1460d40e5671SPhilipp Reisner /* set_capacity(device->this_bdev->bd_disk, size); */ 1461d40e5671SPhilipp Reisner set_capacity(device->vdisk, size); 1462d40e5671SPhilipp Reisner device->this_bdev->bd_inode->i_size = (loff_t)size << 9; 1463d40e5671SPhilipp Reisner } 1464d40e5671SPhilipp Reisner 1465d40e5671SPhilipp Reisner /* 1466d40e5671SPhilipp Reisner * used to submit our private bio 1467d40e5671SPhilipp Reisner */ 1468d40e5671SPhilipp Reisner static inline void drbd_generic_make_request(struct drbd_device *device, 1469d40e5671SPhilipp Reisner int fault_type, struct bio *bio) 1470d40e5671SPhilipp Reisner { 1471d40e5671SPhilipp Reisner __release(local); 1472d40e5671SPhilipp Reisner if (!bio->bi_bdev) { 1473f88c5d90SLars Ellenberg drbd_err(device, "drbd_generic_make_request: bio->bi_bdev == NULL\n"); 1474d40e5671SPhilipp Reisner bio_endio(bio, -ENODEV); 1475d40e5671SPhilipp Reisner return; 1476d40e5671SPhilipp Reisner } 1477d40e5671SPhilipp Reisner 1478d40e5671SPhilipp Reisner if (drbd_insert_fault(device, fault_type)) 1479d40e5671SPhilipp Reisner bio_endio(bio, -EIO); 1480d40e5671SPhilipp Reisner else 1481d40e5671SPhilipp Reisner generic_make_request(bio); 1482d40e5671SPhilipp Reisner } 1483d40e5671SPhilipp Reisner 14848fe39aacSPhilipp Reisner void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backing_dev *bdev, 14858fe39aacSPhilipp Reisner enum write_ordering_e wo); 1486b411b363SPhilipp Reisner 1487b411b363SPhilipp Reisner /* drbd_proc.c */ 1488b411b363SPhilipp Reisner extern struct proc_dir_entry *drbd_proc; 14897d4e9d09SEmese Revfy extern const struct file_operations drbd_proc_fops; 1490b411b363SPhilipp Reisner extern const char *drbd_conn_str(enum drbd_conns s); 1491b411b363SPhilipp Reisner extern const char *drbd_role_str(enum drbd_role s); 1492b411b363SPhilipp Reisner 1493b411b363SPhilipp Reisner /* drbd_actlog.c */ 1494e4d7d6f4SLars Ellenberg extern bool drbd_al_begin_io_prepare(struct drbd_device *device, struct drbd_interval *i); 1495b30ab791SAndreas Gruenbacher extern int drbd_al_begin_io_nonblock(struct drbd_device *device, struct drbd_interval *i); 14964dd726f0SLars Ellenberg extern void drbd_al_begin_io_commit(struct drbd_device *device); 1497b30ab791SAndreas Gruenbacher extern bool drbd_al_begin_io_fastpath(struct drbd_device *device, struct drbd_interval *i); 14984dd726f0SLars Ellenberg extern void drbd_al_begin_io(struct drbd_device *device, struct drbd_interval *i); 1499b30ab791SAndreas Gruenbacher extern void drbd_al_complete_io(struct drbd_device *device, struct drbd_interval *i); 1500b30ab791SAndreas Gruenbacher extern void drbd_rs_complete_io(struct drbd_device *device, sector_t sector); 1501b30ab791SAndreas Gruenbacher extern int drbd_rs_begin_io(struct drbd_device *device, sector_t sector); 1502b30ab791SAndreas Gruenbacher extern int drbd_try_rs_begin_io(struct drbd_device *device, sector_t sector); 1503b30ab791SAndreas Gruenbacher extern void drbd_rs_cancel_all(struct drbd_device *device); 1504b30ab791SAndreas Gruenbacher extern int drbd_rs_del_all(struct drbd_device *device); 1505b30ab791SAndreas Gruenbacher extern void drbd_rs_failed_io(struct drbd_device *device, 1506b411b363SPhilipp Reisner sector_t sector, int size); 1507b30ab791SAndreas Gruenbacher extern void drbd_advance_rs_marks(struct drbd_device *device, unsigned long still_to_go); 15085ab7d2c0SLars Ellenberg 15095ab7d2c0SLars Ellenberg enum update_sync_bits_mode { RECORD_RS_FAILED, SET_OUT_OF_SYNC, SET_IN_SYNC }; 15105ab7d2c0SLars Ellenberg extern int __drbd_change_sync(struct drbd_device *device, sector_t sector, int size, 15115ab7d2c0SLars Ellenberg enum update_sync_bits_mode mode, 15125ab7d2c0SLars Ellenberg const char *file, const unsigned int line); 1513b30ab791SAndreas Gruenbacher #define drbd_set_in_sync(device, sector, size) \ 15145ab7d2c0SLars Ellenberg __drbd_change_sync(device, sector, size, SET_IN_SYNC, __FILE__, __LINE__) 1515b30ab791SAndreas Gruenbacher #define drbd_set_out_of_sync(device, sector, size) \ 15165ab7d2c0SLars Ellenberg __drbd_change_sync(device, sector, size, SET_OUT_OF_SYNC, __FILE__, __LINE__) 15175ab7d2c0SLars Ellenberg #define drbd_rs_failed_io(device, sector, size) \ 15185ab7d2c0SLars Ellenberg __drbd_change_sync(device, sector, size, RECORD_RS_FAILED, __FILE__, __LINE__) 1519b30ab791SAndreas Gruenbacher extern void drbd_al_shrink(struct drbd_device *device); 152054761697SAndreas Gruenbacher extern int drbd_initialize_al(struct drbd_device *, void *); 1521b411b363SPhilipp Reisner 1522b411b363SPhilipp Reisner /* drbd_nl.c */ 15233b98c0c2SLars Ellenberg /* state info broadcast */ 15243b98c0c2SLars Ellenberg struct sib_info { 15253b98c0c2SLars Ellenberg enum drbd_state_info_bcast_reason sib_reason; 15263b98c0c2SLars Ellenberg union { 15273b98c0c2SLars Ellenberg struct { 15283b98c0c2SLars Ellenberg char *helper_name; 15293b98c0c2SLars Ellenberg unsigned helper_exit_code; 15303b98c0c2SLars Ellenberg }; 15313b98c0c2SLars Ellenberg struct { 15323b98c0c2SLars Ellenberg union drbd_state os; 15333b98c0c2SLars Ellenberg union drbd_state ns; 15343b98c0c2SLars Ellenberg }; 15353b98c0c2SLars Ellenberg }; 15363b98c0c2SLars Ellenberg }; 1537b30ab791SAndreas Gruenbacher void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib); 1538b411b363SPhilipp Reisner 1539b411b363SPhilipp Reisner /* 1540b411b363SPhilipp Reisner * inline helper functions 1541b411b363SPhilipp Reisner *************************/ 1542b411b363SPhilipp Reisner 154345bb912bSLars Ellenberg /* see also page_chain_add and friends in drbd_receiver.c */ 154445bb912bSLars Ellenberg static inline struct page *page_chain_next(struct page *page) 154545bb912bSLars Ellenberg { 154645bb912bSLars Ellenberg return (struct page *)page_private(page); 154745bb912bSLars Ellenberg } 154845bb912bSLars Ellenberg #define page_chain_for_each(page) \ 154945bb912bSLars Ellenberg for (; page && ({ prefetch(page_chain_next(page)); 1; }); \ 155045bb912bSLars Ellenberg page = page_chain_next(page)) 155145bb912bSLars Ellenberg #define page_chain_for_each_safe(page, n) \ 155245bb912bSLars Ellenberg for (; page && ({ n = page_chain_next(page); 1; }); page = n) 155345bb912bSLars Ellenberg 155445bb912bSLars Ellenberg 1555045417f7SAndreas Gruenbacher static inline int drbd_peer_req_has_active_page(struct drbd_peer_request *peer_req) 155645bb912bSLars Ellenberg { 1557db830c46SAndreas Gruenbacher struct page *page = peer_req->pages; 155845bb912bSLars Ellenberg page_chain_for_each(page) { 155945bb912bSLars Ellenberg if (page_count(page) > 1) 156045bb912bSLars Ellenberg return 1; 156145bb912bSLars Ellenberg } 156245bb912bSLars Ellenberg return 0; 156345bb912bSLars Ellenberg } 156445bb912bSLars Ellenberg 1565bf885f8aSAndreas Gruenbacher static inline enum drbd_state_rv 1566b30ab791SAndreas Gruenbacher _drbd_set_state(struct drbd_device *device, union drbd_state ns, 1567bf885f8aSAndreas Gruenbacher enum chg_state_flags flags, struct completion *done) 1568b411b363SPhilipp Reisner { 1569bf885f8aSAndreas Gruenbacher enum drbd_state_rv rv; 1570b411b363SPhilipp Reisner 1571b411b363SPhilipp Reisner read_lock(&global_state_lock); 1572b30ab791SAndreas Gruenbacher rv = __drbd_set_state(device, ns, flags, done); 1573b411b363SPhilipp Reisner read_unlock(&global_state_lock); 1574b411b363SPhilipp Reisner 1575b411b363SPhilipp Reisner return rv; 1576b411b363SPhilipp Reisner } 1577b411b363SPhilipp Reisner 1578b30ab791SAndreas Gruenbacher static inline union drbd_state drbd_read_state(struct drbd_device *device) 1579b411b363SPhilipp Reisner { 15806bbf53caSAndreas Gruenbacher struct drbd_resource *resource = device->resource; 158178bae59bSPhilipp Reisner union drbd_state rv; 158278bae59bSPhilipp Reisner 1583b30ab791SAndreas Gruenbacher rv.i = device->state.i; 15846bbf53caSAndreas Gruenbacher rv.susp = resource->susp; 15856bbf53caSAndreas Gruenbacher rv.susp_nod = resource->susp_nod; 15866bbf53caSAndreas Gruenbacher rv.susp_fen = resource->susp_fen; 158778bae59bSPhilipp Reisner 158878bae59bSPhilipp Reisner return rv; 1589b411b363SPhilipp Reisner } 1590b411b363SPhilipp Reisner 1591383606e0SLars Ellenberg enum drbd_force_detach_flags { 1592a2a3c74fSLars Ellenberg DRBD_READ_ERROR, 1593a2a3c74fSLars Ellenberg DRBD_WRITE_ERROR, 1594383606e0SLars Ellenberg DRBD_META_IO_ERROR, 1595383606e0SLars Ellenberg DRBD_FORCE_DETACH, 1596383606e0SLars Ellenberg }; 1597383606e0SLars Ellenberg 1598b411b363SPhilipp Reisner #define __drbd_chk_io_error(m,f) __drbd_chk_io_error_(m,f, __func__) 1599b30ab791SAndreas Gruenbacher static inline void __drbd_chk_io_error_(struct drbd_device *device, 1600a2a3c74fSLars Ellenberg enum drbd_force_detach_flags df, 1601383606e0SLars Ellenberg const char *where) 1602b411b363SPhilipp Reisner { 1603daeda1ccSPhilipp Reisner enum drbd_io_error_p ep; 1604daeda1ccSPhilipp Reisner 1605daeda1ccSPhilipp Reisner rcu_read_lock(); 1606b30ab791SAndreas Gruenbacher ep = rcu_dereference(device->ldev->disk_conf)->on_io_error; 1607daeda1ccSPhilipp Reisner rcu_read_unlock(); 1608daeda1ccSPhilipp Reisner switch (ep) { 1609daeda1ccSPhilipp Reisner case EP_PASS_ON: /* FIXME would this be better named "Ignore"? */ 1610a2a3c74fSLars Ellenberg if (df == DRBD_READ_ERROR || df == DRBD_WRITE_ERROR) { 16117383506cSLars Ellenberg if (__ratelimit(&drbd_ratelimit_state)) 1612d0180171SAndreas Gruenbacher drbd_err(device, "Local IO failed in %s.\n", where); 1613b30ab791SAndreas Gruenbacher if (device->state.disk > D_INCONSISTENT) 1614b30ab791SAndreas Gruenbacher _drbd_set_state(_NS(device, disk, D_INCONSISTENT), CS_HARD, NULL); 1615b411b363SPhilipp Reisner break; 1616b411b363SPhilipp Reisner } 1617a2a3c74fSLars Ellenberg /* NOTE fall through for DRBD_META_IO_ERROR or DRBD_FORCE_DETACH */ 1618b411b363SPhilipp Reisner case EP_DETACH: 1619b411b363SPhilipp Reisner case EP_CALL_HELPER: 1620a2a3c74fSLars Ellenberg /* Remember whether we saw a READ or WRITE error. 1621a2a3c74fSLars Ellenberg * 1622a2a3c74fSLars Ellenberg * Recovery of the affected area for WRITE failure is covered 1623a2a3c74fSLars Ellenberg * by the activity log. 1624a2a3c74fSLars Ellenberg * READ errors may fall outside that area though. Certain READ 1625a2a3c74fSLars Ellenberg * errors can be "healed" by writing good data to the affected 1626a2a3c74fSLars Ellenberg * blocks, which triggers block re-allocation in lower layers. 1627a2a3c74fSLars Ellenberg * 1628a2a3c74fSLars Ellenberg * If we can not write the bitmap after a READ error, 1629a2a3c74fSLars Ellenberg * we may need to trigger a full sync (see w_go_diskless()). 1630a2a3c74fSLars Ellenberg * 1631a2a3c74fSLars Ellenberg * Force-detach is not really an IO error, but rather a 1632a2a3c74fSLars Ellenberg * desperate measure to try to deal with a completely 1633a2a3c74fSLars Ellenberg * unresponsive lower level IO stack. 1634a2a3c74fSLars Ellenberg * Still it should be treated as a WRITE error. 1635a2a3c74fSLars Ellenberg * 1636a2a3c74fSLars Ellenberg * Meta IO error is always WRITE error: 1637a2a3c74fSLars Ellenberg * we read meta data only once during attach, 1638a2a3c74fSLars Ellenberg * which will fail in case of errors. 1639a2a3c74fSLars Ellenberg */ 1640b30ab791SAndreas Gruenbacher set_bit(WAS_IO_ERROR, &device->flags); 1641a2a3c74fSLars Ellenberg if (df == DRBD_READ_ERROR) 1642b30ab791SAndreas Gruenbacher set_bit(WAS_READ_ERROR, &device->flags); 1643a2a3c74fSLars Ellenberg if (df == DRBD_FORCE_DETACH) 1644b30ab791SAndreas Gruenbacher set_bit(FORCE_DETACH, &device->flags); 1645b30ab791SAndreas Gruenbacher if (device->state.disk > D_FAILED) { 1646b30ab791SAndreas Gruenbacher _drbd_set_state(_NS(device, disk, D_FAILED), CS_HARD, NULL); 1647d0180171SAndreas Gruenbacher drbd_err(device, 164882f59cc6SLars Ellenberg "Local IO failed in %s. Detaching...\n", where); 1649b411b363SPhilipp Reisner } 1650b411b363SPhilipp Reisner break; 1651b411b363SPhilipp Reisner } 1652b411b363SPhilipp Reisner } 1653b411b363SPhilipp Reisner 1654b411b363SPhilipp Reisner /** 1655b411b363SPhilipp Reisner * drbd_chk_io_error: Handle the on_io_error setting, should be called from all io completion handlers 1656b30ab791SAndreas Gruenbacher * @device: DRBD device. 1657b411b363SPhilipp Reisner * @error: Error code passed to the IO completion callback 1658b411b363SPhilipp Reisner * @forcedetach: Force detach. I.e. the error happened while accessing the meta data 1659b411b363SPhilipp Reisner * 1660b411b363SPhilipp Reisner * See also drbd_main.c:after_state_ch() if (os.disk > D_FAILED && ns.disk == D_FAILED) 1661b411b363SPhilipp Reisner */ 1662b411b363SPhilipp Reisner #define drbd_chk_io_error(m,e,f) drbd_chk_io_error_(m,e,f, __func__) 1663b30ab791SAndreas Gruenbacher static inline void drbd_chk_io_error_(struct drbd_device *device, 1664383606e0SLars Ellenberg int error, enum drbd_force_detach_flags forcedetach, const char *where) 1665b411b363SPhilipp Reisner { 1666b411b363SPhilipp Reisner if (error) { 1667b411b363SPhilipp Reisner unsigned long flags; 16680500813fSAndreas Gruenbacher spin_lock_irqsave(&device->resource->req_lock, flags); 1669b30ab791SAndreas Gruenbacher __drbd_chk_io_error_(device, forcedetach, where); 16700500813fSAndreas Gruenbacher spin_unlock_irqrestore(&device->resource->req_lock, flags); 1671b411b363SPhilipp Reisner } 1672b411b363SPhilipp Reisner } 1673b411b363SPhilipp Reisner 1674b411b363SPhilipp Reisner 1675b411b363SPhilipp Reisner /** 1676b411b363SPhilipp Reisner * drbd_md_first_sector() - Returns the first sector number of the meta data area 1677b411b363SPhilipp Reisner * @bdev: Meta data block device. 1678b411b363SPhilipp Reisner * 1679b411b363SPhilipp Reisner * BTW, for internal meta data, this happens to be the maximum capacity 1680b411b363SPhilipp Reisner * we could agree upon with our peer node. 1681b411b363SPhilipp Reisner */ 168268e41a43SLars Ellenberg static inline sector_t drbd_md_first_sector(struct drbd_backing_dev *bdev) 1683b411b363SPhilipp Reisner { 168468e41a43SLars Ellenberg switch (bdev->md.meta_dev_idx) { 1685b411b363SPhilipp Reisner case DRBD_MD_INDEX_INTERNAL: 1686b411b363SPhilipp Reisner case DRBD_MD_INDEX_FLEX_INT: 1687b411b363SPhilipp Reisner return bdev->md.md_offset + bdev->md.bm_offset; 1688b411b363SPhilipp Reisner case DRBD_MD_INDEX_FLEX_EXT: 1689b411b363SPhilipp Reisner default: 1690b411b363SPhilipp Reisner return bdev->md.md_offset; 1691b411b363SPhilipp Reisner } 1692b411b363SPhilipp Reisner } 1693b411b363SPhilipp Reisner 1694b411b363SPhilipp Reisner /** 1695b411b363SPhilipp Reisner * drbd_md_last_sector() - Return the last sector number of the meta data area 1696b411b363SPhilipp Reisner * @bdev: Meta data block device. 1697b411b363SPhilipp Reisner */ 1698b411b363SPhilipp Reisner static inline sector_t drbd_md_last_sector(struct drbd_backing_dev *bdev) 1699b411b363SPhilipp Reisner { 170068e41a43SLars Ellenberg switch (bdev->md.meta_dev_idx) { 1701b411b363SPhilipp Reisner case DRBD_MD_INDEX_INTERNAL: 1702b411b363SPhilipp Reisner case DRBD_MD_INDEX_FLEX_INT: 1703ae8bf312SLars Ellenberg return bdev->md.md_offset + MD_4kB_SECT -1; 1704b411b363SPhilipp Reisner case DRBD_MD_INDEX_FLEX_EXT: 1705b411b363SPhilipp Reisner default: 1706ae8bf312SLars Ellenberg return bdev->md.md_offset + bdev->md.md_size_sect -1; 1707b411b363SPhilipp Reisner } 1708b411b363SPhilipp Reisner } 1709b411b363SPhilipp Reisner 1710b411b363SPhilipp Reisner /* Returns the number of 512 byte sectors of the device */ 1711b411b363SPhilipp Reisner static inline sector_t drbd_get_capacity(struct block_device *bdev) 1712b411b363SPhilipp Reisner { 1713b411b363SPhilipp Reisner /* return bdev ? get_capacity(bdev->bd_disk) : 0; */ 171477304d2aSMike Snitzer return bdev ? i_size_read(bdev->bd_inode) >> 9 : 0; 1715b411b363SPhilipp Reisner } 1716b411b363SPhilipp Reisner 1717b411b363SPhilipp Reisner /** 1718b411b363SPhilipp Reisner * drbd_get_max_capacity() - Returns the capacity we announce to out peer 1719b411b363SPhilipp Reisner * @bdev: Meta data block device. 1720b411b363SPhilipp Reisner * 1721b411b363SPhilipp Reisner * returns the capacity we announce to out peer. we clip ourselves at the 1722b411b363SPhilipp Reisner * various MAX_SECTORS, because if we don't, current implementation will 1723b411b363SPhilipp Reisner * oops sooner or later 1724b411b363SPhilipp Reisner */ 1725b411b363SPhilipp Reisner static inline sector_t drbd_get_max_capacity(struct drbd_backing_dev *bdev) 1726b411b363SPhilipp Reisner { 1727b411b363SPhilipp Reisner sector_t s; 1728daeda1ccSPhilipp Reisner 172968e41a43SLars Ellenberg switch (bdev->md.meta_dev_idx) { 1730b411b363SPhilipp Reisner case DRBD_MD_INDEX_INTERNAL: 1731b411b363SPhilipp Reisner case DRBD_MD_INDEX_FLEX_INT: 1732b411b363SPhilipp Reisner s = drbd_get_capacity(bdev->backing_bdev) 1733b411b363SPhilipp Reisner ? min_t(sector_t, DRBD_MAX_SECTORS_FLEX, 173468e41a43SLars Ellenberg drbd_md_first_sector(bdev)) 1735b411b363SPhilipp Reisner : 0; 1736b411b363SPhilipp Reisner break; 1737b411b363SPhilipp Reisner case DRBD_MD_INDEX_FLEX_EXT: 1738b411b363SPhilipp Reisner s = min_t(sector_t, DRBD_MAX_SECTORS_FLEX, 1739b411b363SPhilipp Reisner drbd_get_capacity(bdev->backing_bdev)); 1740b411b363SPhilipp Reisner /* clip at maximum size the meta device can support */ 1741b411b363SPhilipp Reisner s = min_t(sector_t, s, 1742b411b363SPhilipp Reisner BM_EXT_TO_SECT(bdev->md.md_size_sect 1743b411b363SPhilipp Reisner - bdev->md.bm_offset)); 1744b411b363SPhilipp Reisner break; 1745b411b363SPhilipp Reisner default: 1746b411b363SPhilipp Reisner s = min_t(sector_t, DRBD_MAX_SECTORS, 1747b411b363SPhilipp Reisner drbd_get_capacity(bdev->backing_bdev)); 1748b411b363SPhilipp Reisner } 1749b411b363SPhilipp Reisner return s; 1750b411b363SPhilipp Reisner } 1751b411b363SPhilipp Reisner 1752b411b363SPhilipp Reisner /** 17533a4d4eb3SLars Ellenberg * drbd_md_ss() - Return the sector number of our meta data super block 1754b411b363SPhilipp Reisner * @bdev: Meta data block device. 1755b411b363SPhilipp Reisner */ 17563a4d4eb3SLars Ellenberg static inline sector_t drbd_md_ss(struct drbd_backing_dev *bdev) 1757b411b363SPhilipp Reisner { 17583a4d4eb3SLars Ellenberg const int meta_dev_idx = bdev->md.meta_dev_idx; 1759daeda1ccSPhilipp Reisner 17603a4d4eb3SLars Ellenberg if (meta_dev_idx == DRBD_MD_INDEX_FLEX_EXT) 1761b411b363SPhilipp Reisner return 0; 17623a4d4eb3SLars Ellenberg 17633a4d4eb3SLars Ellenberg /* Since drbd08, internal meta data is always "flexible". 1764ae8bf312SLars Ellenberg * position: last 4k aligned block of 4k size */ 17653a4d4eb3SLars Ellenberg if (meta_dev_idx == DRBD_MD_INDEX_INTERNAL || 17663a4d4eb3SLars Ellenberg meta_dev_idx == DRBD_MD_INDEX_FLEX_INT) 1767ae8bf312SLars Ellenberg return (drbd_get_capacity(bdev->backing_bdev) & ~7ULL) - 8; 17683a4d4eb3SLars Ellenberg 17693a4d4eb3SLars Ellenberg /* external, some index; this is the old fixed size layout */ 17703a4d4eb3SLars Ellenberg return MD_128MB_SECT * bdev->md.meta_dev_idx; 1771b411b363SPhilipp Reisner } 1772b411b363SPhilipp Reisner 1773b411b363SPhilipp Reisner static inline void 1774b411b363SPhilipp Reisner drbd_queue_work(struct drbd_work_queue *q, struct drbd_work *w) 1775b411b363SPhilipp Reisner { 1776b411b363SPhilipp Reisner unsigned long flags; 1777b411b363SPhilipp Reisner spin_lock_irqsave(&q->q_lock, flags); 1778b411b363SPhilipp Reisner list_add_tail(&w->list, &q->q); 1779b411b363SPhilipp Reisner spin_unlock_irqrestore(&q->q_lock, flags); 17808c0785a5SLars Ellenberg wake_up(&q->q_wait); 1781b411b363SPhilipp Reisner } 1782b411b363SPhilipp Reisner 1783e334f550SLars Ellenberg static inline void 178415e26f6aSLars Ellenberg drbd_queue_work_if_unqueued(struct drbd_work_queue *q, struct drbd_work *w) 178515e26f6aSLars Ellenberg { 178615e26f6aSLars Ellenberg unsigned long flags; 178715e26f6aSLars Ellenberg spin_lock_irqsave(&q->q_lock, flags); 178815e26f6aSLars Ellenberg if (list_empty_careful(&w->list)) 178915e26f6aSLars Ellenberg list_add_tail(&w->list, &q->q); 179015e26f6aSLars Ellenberg spin_unlock_irqrestore(&q->q_lock, flags); 179115e26f6aSLars Ellenberg wake_up(&q->q_wait); 179215e26f6aSLars Ellenberg } 179315e26f6aSLars Ellenberg 179415e26f6aSLars Ellenberg static inline void 1795e334f550SLars Ellenberg drbd_device_post_work(struct drbd_device *device, int work_bit) 1796e334f550SLars Ellenberg { 1797e334f550SLars Ellenberg if (!test_and_set_bit(work_bit, &device->flags)) { 1798e334f550SLars Ellenberg struct drbd_connection *connection = 1799e334f550SLars Ellenberg first_peer_device(device)->connection; 1800e334f550SLars Ellenberg struct drbd_work_queue *q = &connection->sender_work; 1801e334f550SLars Ellenberg if (!test_and_set_bit(DEVICE_WORK_PENDING, &connection->flags)) 1802e334f550SLars Ellenberg wake_up(&q->q_wait); 1803e334f550SLars Ellenberg } 1804e334f550SLars Ellenberg } 1805e334f550SLars Ellenberg 1806b5043c5eSAndreas Gruenbacher extern void drbd_flush_workqueue(struct drbd_work_queue *work_queue); 1807b5043c5eSAndreas Gruenbacher 1808bde89a9eSAndreas Gruenbacher static inline void wake_asender(struct drbd_connection *connection) 1809b411b363SPhilipp Reisner { 1810bde89a9eSAndreas Gruenbacher if (test_bit(SIGNAL_ASENDER, &connection->flags)) 1811bde89a9eSAndreas Gruenbacher force_sig(DRBD_SIG, connection->asender.task); 1812b411b363SPhilipp Reisner } 1813b411b363SPhilipp Reisner 1814bde89a9eSAndreas Gruenbacher static inline void request_ping(struct drbd_connection *connection) 1815b411b363SPhilipp Reisner { 1816bde89a9eSAndreas Gruenbacher set_bit(SEND_PING, &connection->flags); 1817bde89a9eSAndreas Gruenbacher wake_asender(connection); 1818b411b363SPhilipp Reisner } 1819b411b363SPhilipp Reisner 1820bde89a9eSAndreas Gruenbacher extern void *conn_prepare_command(struct drbd_connection *, struct drbd_socket *); 182169a22773SAndreas Gruenbacher extern void *drbd_prepare_command(struct drbd_peer_device *, struct drbd_socket *); 1822bde89a9eSAndreas Gruenbacher extern int conn_send_command(struct drbd_connection *, struct drbd_socket *, 1823dba58587SAndreas Gruenbacher enum drbd_packet, unsigned int, void *, 1824dba58587SAndreas Gruenbacher unsigned int); 182569a22773SAndreas Gruenbacher extern int drbd_send_command(struct drbd_peer_device *, struct drbd_socket *, 1826dba58587SAndreas Gruenbacher enum drbd_packet, unsigned int, void *, 1827dba58587SAndreas Gruenbacher unsigned int); 1828b411b363SPhilipp Reisner 1829bde89a9eSAndreas Gruenbacher extern int drbd_send_ping(struct drbd_connection *connection); 1830bde89a9eSAndreas Gruenbacher extern int drbd_send_ping_ack(struct drbd_connection *connection); 183169a22773SAndreas Gruenbacher extern int drbd_send_state_req(struct drbd_peer_device *, union drbd_state, union drbd_state); 1832bde89a9eSAndreas Gruenbacher extern int conn_send_state_req(struct drbd_connection *, union drbd_state, union drbd_state); 1833b411b363SPhilipp Reisner 1834b411b363SPhilipp Reisner static inline void drbd_thread_stop(struct drbd_thread *thi) 1835b411b363SPhilipp Reisner { 183681e84650SAndreas Gruenbacher _drbd_thread_stop(thi, false, true); 1837b411b363SPhilipp Reisner } 1838b411b363SPhilipp Reisner 1839b411b363SPhilipp Reisner static inline void drbd_thread_stop_nowait(struct drbd_thread *thi) 1840b411b363SPhilipp Reisner { 184181e84650SAndreas Gruenbacher _drbd_thread_stop(thi, false, false); 1842b411b363SPhilipp Reisner } 1843b411b363SPhilipp Reisner 1844b411b363SPhilipp Reisner static inline void drbd_thread_restart_nowait(struct drbd_thread *thi) 1845b411b363SPhilipp Reisner { 184681e84650SAndreas Gruenbacher _drbd_thread_stop(thi, true, false); 1847b411b363SPhilipp Reisner } 1848b411b363SPhilipp Reisner 1849b411b363SPhilipp Reisner /* counts how many answer packets packets we expect from our peer, 1850b411b363SPhilipp Reisner * for either explicit application requests, 1851b411b363SPhilipp Reisner * or implicit barrier packets as necessary. 1852b411b363SPhilipp Reisner * increased: 1853b411b363SPhilipp Reisner * w_send_barrier 18548554df1cSAndreas Gruenbacher * _req_mod(req, QUEUE_FOR_NET_WRITE or QUEUE_FOR_NET_READ); 1855b411b363SPhilipp Reisner * it is much easier and equally valid to count what we queue for the 1856b411b363SPhilipp Reisner * worker, even before it actually was queued or send. 1857b411b363SPhilipp Reisner * (drbd_make_request_common; recovery path on read io-error) 1858b411b363SPhilipp Reisner * decreased: 1859b411b363SPhilipp Reisner * got_BarrierAck (respective tl_clear, tl_clear_barrier) 18608554df1cSAndreas Gruenbacher * _req_mod(req, DATA_RECEIVED) 1861b411b363SPhilipp Reisner * [from receive_DataReply] 18628554df1cSAndreas Gruenbacher * _req_mod(req, WRITE_ACKED_BY_PEER or RECV_ACKED_BY_PEER or NEG_ACKED) 1863b411b363SPhilipp Reisner * [from got_BlockAck (P_WRITE_ACK, P_RECV_ACK)] 1864b411b363SPhilipp Reisner * for some reason it is NOT decreased in got_NegAck, 1865b411b363SPhilipp Reisner * but in the resulting cleanup code from report_params. 1866b411b363SPhilipp Reisner * we should try to remember the reason for that... 18678554df1cSAndreas Gruenbacher * _req_mod(req, SEND_FAILED or SEND_CANCELED) 18688554df1cSAndreas Gruenbacher * _req_mod(req, CONNECTION_LOST_WHILE_PENDING) 1869b411b363SPhilipp Reisner * [from tl_clear_barrier] 1870b411b363SPhilipp Reisner */ 1871b30ab791SAndreas Gruenbacher static inline void inc_ap_pending(struct drbd_device *device) 1872b411b363SPhilipp Reisner { 1873b30ab791SAndreas Gruenbacher atomic_inc(&device->ap_pending_cnt); 1874b411b363SPhilipp Reisner } 1875b411b363SPhilipp Reisner 187649559d87SPhilipp Reisner #define ERR_IF_CNT_IS_NEGATIVE(which, func, line) \ 1877b30ab791SAndreas Gruenbacher if (atomic_read(&device->which) < 0) \ 1878d0180171SAndreas Gruenbacher drbd_err(device, "in %s:%d: " #which " = %d < 0 !\n", \ 187949559d87SPhilipp Reisner func, line, \ 1880b30ab791SAndreas Gruenbacher atomic_read(&device->which)) 1881b411b363SPhilipp Reisner 1882659b2e3bSJoe Perches #define dec_ap_pending(device) _dec_ap_pending(device, __func__, __LINE__) 1883b30ab791SAndreas Gruenbacher static inline void _dec_ap_pending(struct drbd_device *device, const char *func, int line) 188449559d87SPhilipp Reisner { 1885b30ab791SAndreas Gruenbacher if (atomic_dec_and_test(&device->ap_pending_cnt)) 1886b30ab791SAndreas Gruenbacher wake_up(&device->misc_wait); 188749559d87SPhilipp Reisner ERR_IF_CNT_IS_NEGATIVE(ap_pending_cnt, func, line); 188849559d87SPhilipp Reisner } 1889b411b363SPhilipp Reisner 1890b411b363SPhilipp Reisner /* counts how many resync-related answers we still expect from the peer 1891b411b363SPhilipp Reisner * increase decrease 1892b411b363SPhilipp Reisner * C_SYNC_TARGET sends P_RS_DATA_REQUEST (and expects P_RS_DATA_REPLY) 189325985edcSLucas De Marchi * C_SYNC_SOURCE sends P_RS_DATA_REPLY (and expects P_WRITE_ACK with ID_SYNCER) 1894b411b363SPhilipp Reisner * (or P_NEG_ACK with ID_SYNCER) 1895b411b363SPhilipp Reisner */ 1896b30ab791SAndreas Gruenbacher static inline void inc_rs_pending(struct drbd_device *device) 1897b411b363SPhilipp Reisner { 1898b30ab791SAndreas Gruenbacher atomic_inc(&device->rs_pending_cnt); 1899b411b363SPhilipp Reisner } 1900b411b363SPhilipp Reisner 1901659b2e3bSJoe Perches #define dec_rs_pending(device) _dec_rs_pending(device, __func__, __LINE__) 1902b30ab791SAndreas Gruenbacher static inline void _dec_rs_pending(struct drbd_device *device, const char *func, int line) 190349559d87SPhilipp Reisner { 1904b30ab791SAndreas Gruenbacher atomic_dec(&device->rs_pending_cnt); 190549559d87SPhilipp Reisner ERR_IF_CNT_IS_NEGATIVE(rs_pending_cnt, func, line); 190649559d87SPhilipp Reisner } 1907b411b363SPhilipp Reisner 1908b411b363SPhilipp Reisner /* counts how many answers we still need to send to the peer. 1909b411b363SPhilipp Reisner * increased on 1910b411b363SPhilipp Reisner * receive_Data unless protocol A; 1911b411b363SPhilipp Reisner * we need to send a P_RECV_ACK (proto B) 1912b411b363SPhilipp Reisner * or P_WRITE_ACK (proto C) 1913b411b363SPhilipp Reisner * receive_RSDataReply (recv_resync_read) we need to send a P_WRITE_ACK 1914b411b363SPhilipp Reisner * receive_DataRequest (receive_RSDataRequest) we need to send back P_DATA 1915b411b363SPhilipp Reisner * receive_Barrier_* we need to send a P_BARRIER_ACK 1916b411b363SPhilipp Reisner */ 1917b30ab791SAndreas Gruenbacher static inline void inc_unacked(struct drbd_device *device) 1918b411b363SPhilipp Reisner { 1919b30ab791SAndreas Gruenbacher atomic_inc(&device->unacked_cnt); 1920b411b363SPhilipp Reisner } 1921b411b363SPhilipp Reisner 1922659b2e3bSJoe Perches #define dec_unacked(device) _dec_unacked(device, __func__, __LINE__) 1923b30ab791SAndreas Gruenbacher static inline void _dec_unacked(struct drbd_device *device, const char *func, int line) 1924b411b363SPhilipp Reisner { 1925b30ab791SAndreas Gruenbacher atomic_dec(&device->unacked_cnt); 192649559d87SPhilipp Reisner ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line); 1927b411b363SPhilipp Reisner } 1928b411b363SPhilipp Reisner 1929659b2e3bSJoe Perches #define sub_unacked(device, n) _sub_unacked(device, n, __func__, __LINE__) 1930b30ab791SAndreas Gruenbacher static inline void _sub_unacked(struct drbd_device *device, int n, const char *func, int line) 1931b411b363SPhilipp Reisner { 1932b30ab791SAndreas Gruenbacher atomic_sub(n, &device->unacked_cnt); 193349559d87SPhilipp Reisner ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line); 1934b411b363SPhilipp Reisner } 1935b411b363SPhilipp Reisner 19365ab7d2c0SLars Ellenberg static inline bool is_sync_state(enum drbd_conns connection_state) 19375ab7d2c0SLars Ellenberg { 19385ab7d2c0SLars Ellenberg return 19395ab7d2c0SLars Ellenberg (connection_state == C_SYNC_SOURCE 19405ab7d2c0SLars Ellenberg || connection_state == C_SYNC_TARGET 19415ab7d2c0SLars Ellenberg || connection_state == C_PAUSED_SYNC_S 19425ab7d2c0SLars Ellenberg || connection_state == C_PAUSED_SYNC_T); 19435ab7d2c0SLars Ellenberg } 19445ab7d2c0SLars Ellenberg 1945b411b363SPhilipp Reisner /** 1946b30ab791SAndreas Gruenbacher * get_ldev() - Increase the ref count on device->ldev. Returns 0 if there is no ldev 1947b411b363SPhilipp Reisner * @M: DRBD device. 1948b411b363SPhilipp Reisner * 1949b30ab791SAndreas Gruenbacher * You have to call put_ldev() when finished working with device->ldev. 1950b411b363SPhilipp Reisner */ 1951b411b363SPhilipp Reisner #define get_ldev(M) __cond_lock(local, _get_ldev_if_state(M,D_INCONSISTENT)) 1952b411b363SPhilipp Reisner #define get_ldev_if_state(M,MINS) __cond_lock(local, _get_ldev_if_state(M,MINS)) 1953b411b363SPhilipp Reisner 1954b30ab791SAndreas Gruenbacher static inline void put_ldev(struct drbd_device *device) 1955b411b363SPhilipp Reisner { 1956ba3c6fb8SLars Ellenberg enum drbd_disk_state ds = device->state.disk; 1957ba3c6fb8SLars Ellenberg /* We must check the state *before* the atomic_dec becomes visible, 1958ba3c6fb8SLars Ellenberg * or we have a theoretical race where someone hitting zero, 1959ba3c6fb8SLars Ellenberg * while state still D_FAILED, will then see D_DISKLESS in the 1960ba3c6fb8SLars Ellenberg * condition below and calling into destroy, where he must not, yet. */ 1961b30ab791SAndreas Gruenbacher int i = atomic_dec_return(&device->local_cnt); 19629a0d9d03SLars Ellenberg 19639a0d9d03SLars Ellenberg /* This may be called from some endio handler, 19649a0d9d03SLars Ellenberg * so we must not sleep here. */ 19659a0d9d03SLars Ellenberg 1966b411b363SPhilipp Reisner __release(local); 19670b0ba1efSAndreas Gruenbacher D_ASSERT(device, i >= 0); 1968e9e6f3ecSLars Ellenberg if (i == 0) { 1969ba3c6fb8SLars Ellenberg if (ds == D_DISKLESS) 197082f59cc6SLars Ellenberg /* even internal references gone, safe to destroy */ 1971e334f550SLars Ellenberg drbd_device_post_work(device, DESTROY_DISK); 1972e334f550SLars Ellenberg if (ds == D_FAILED) 197382f59cc6SLars Ellenberg /* all application IO references gone. */ 1974e334f550SLars Ellenberg if (!test_and_set_bit(GOING_DISKLESS, &device->flags)) 1975e334f550SLars Ellenberg drbd_device_post_work(device, GO_DISKLESS); 1976b30ab791SAndreas Gruenbacher wake_up(&device->misc_wait); 1977b411b363SPhilipp Reisner } 1978e9e6f3ecSLars Ellenberg } 1979b411b363SPhilipp Reisner 1980b411b363SPhilipp Reisner #ifndef __CHECKER__ 1981b30ab791SAndreas Gruenbacher static inline int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins) 1982b411b363SPhilipp Reisner { 1983b411b363SPhilipp Reisner int io_allowed; 1984b411b363SPhilipp Reisner 198582f59cc6SLars Ellenberg /* never get a reference while D_DISKLESS */ 1986b30ab791SAndreas Gruenbacher if (device->state.disk == D_DISKLESS) 198782f59cc6SLars Ellenberg return 0; 198882f59cc6SLars Ellenberg 1989b30ab791SAndreas Gruenbacher atomic_inc(&device->local_cnt); 1990b30ab791SAndreas Gruenbacher io_allowed = (device->state.disk >= mins); 1991b411b363SPhilipp Reisner if (!io_allowed) 1992b30ab791SAndreas Gruenbacher put_ldev(device); 1993b411b363SPhilipp Reisner return io_allowed; 1994b411b363SPhilipp Reisner } 1995b411b363SPhilipp Reisner #else 1996b30ab791SAndreas Gruenbacher extern int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins); 1997b411b363SPhilipp Reisner #endif 1998b411b363SPhilipp Reisner 1999b411b363SPhilipp Reisner /* this throttles on-the-fly application requests 2000b411b363SPhilipp Reisner * according to max_buffers settings; 2001b411b363SPhilipp Reisner * maybe re-implement using semaphores? */ 2002b30ab791SAndreas Gruenbacher static inline int drbd_get_max_buffers(struct drbd_device *device) 2003b411b363SPhilipp Reisner { 200444ed167dSPhilipp Reisner struct net_conf *nc; 200544ed167dSPhilipp Reisner int mxb; 200644ed167dSPhilipp Reisner 200744ed167dSPhilipp Reisner rcu_read_lock(); 2008a6b32bc3SAndreas Gruenbacher nc = rcu_dereference(first_peer_device(device)->connection->net_conf); 200944ed167dSPhilipp Reisner mxb = nc ? nc->max_buffers : 1000000; /* arbitrary limit on open requests */ 201044ed167dSPhilipp Reisner rcu_read_unlock(); 201144ed167dSPhilipp Reisner 2012b411b363SPhilipp Reisner return mxb; 2013b411b363SPhilipp Reisner } 2014b411b363SPhilipp Reisner 2015b30ab791SAndreas Gruenbacher static inline int drbd_state_is_stable(struct drbd_device *device) 2016b411b363SPhilipp Reisner { 2017b30ab791SAndreas Gruenbacher union drbd_dev_state s = device->state; 2018b411b363SPhilipp Reisner 2019b411b363SPhilipp Reisner /* DO NOT add a default clause, we want the compiler to warn us 2020b411b363SPhilipp Reisner * for any newly introduced state we may have forgotten to add here */ 2021b411b363SPhilipp Reisner 2022b411b363SPhilipp Reisner switch ((enum drbd_conns)s.conn) { 2023b411b363SPhilipp Reisner /* new io only accepted when there is no connection, ... */ 2024b411b363SPhilipp Reisner case C_STANDALONE: 2025b411b363SPhilipp Reisner case C_WF_CONNECTION: 2026b411b363SPhilipp Reisner /* ... or there is a well established connection. */ 2027b411b363SPhilipp Reisner case C_CONNECTED: 2028b411b363SPhilipp Reisner case C_SYNC_SOURCE: 2029b411b363SPhilipp Reisner case C_SYNC_TARGET: 2030b411b363SPhilipp Reisner case C_VERIFY_S: 2031b411b363SPhilipp Reisner case C_VERIFY_T: 2032b411b363SPhilipp Reisner case C_PAUSED_SYNC_S: 2033b411b363SPhilipp Reisner case C_PAUSED_SYNC_T: 203467531718SPhilipp Reisner case C_AHEAD: 203567531718SPhilipp Reisner case C_BEHIND: 20363719094eSPhilipp Reisner /* transitional states, IO allowed */ 2037b411b363SPhilipp Reisner case C_DISCONNECTING: 2038b411b363SPhilipp Reisner case C_UNCONNECTED: 2039b411b363SPhilipp Reisner case C_TIMEOUT: 2040b411b363SPhilipp Reisner case C_BROKEN_PIPE: 2041b411b363SPhilipp Reisner case C_NETWORK_FAILURE: 2042b411b363SPhilipp Reisner case C_PROTOCOL_ERROR: 2043b411b363SPhilipp Reisner case C_TEAR_DOWN: 2044b411b363SPhilipp Reisner case C_WF_REPORT_PARAMS: 2045b411b363SPhilipp Reisner case C_STARTING_SYNC_S: 2046b411b363SPhilipp Reisner case C_STARTING_SYNC_T: 20473719094eSPhilipp Reisner break; 20483719094eSPhilipp Reisner 20493719094eSPhilipp Reisner /* Allow IO in BM exchange states with new protocols */ 2050b411b363SPhilipp Reisner case C_WF_BITMAP_S: 2051a6b32bc3SAndreas Gruenbacher if (first_peer_device(device)->connection->agreed_pro_version < 96) 20523719094eSPhilipp Reisner return 0; 20533719094eSPhilipp Reisner break; 20543719094eSPhilipp Reisner 20553719094eSPhilipp Reisner /* no new io accepted in these states */ 2056b411b363SPhilipp Reisner case C_WF_BITMAP_T: 2057b411b363SPhilipp Reisner case C_WF_SYNC_UUID: 2058b411b363SPhilipp Reisner case C_MASK: 2059b411b363SPhilipp Reisner /* not "stable" */ 2060b411b363SPhilipp Reisner return 0; 2061b411b363SPhilipp Reisner } 2062b411b363SPhilipp Reisner 2063b411b363SPhilipp Reisner switch ((enum drbd_disk_state)s.disk) { 2064b411b363SPhilipp Reisner case D_DISKLESS: 2065b411b363SPhilipp Reisner case D_INCONSISTENT: 2066b411b363SPhilipp Reisner case D_OUTDATED: 2067b411b363SPhilipp Reisner case D_CONSISTENT: 2068b411b363SPhilipp Reisner case D_UP_TO_DATE: 20695ca1de03SPhilipp Reisner case D_FAILED: 2070b411b363SPhilipp Reisner /* disk state is stable as well. */ 2071b411b363SPhilipp Reisner break; 2072b411b363SPhilipp Reisner 2073d942ae44SPhilipp Reisner /* no new io accepted during transitional states */ 2074b411b363SPhilipp Reisner case D_ATTACHING: 2075b411b363SPhilipp Reisner case D_NEGOTIATING: 2076b411b363SPhilipp Reisner case D_UNKNOWN: 2077b411b363SPhilipp Reisner case D_MASK: 2078b411b363SPhilipp Reisner /* not "stable" */ 2079b411b363SPhilipp Reisner return 0; 2080b411b363SPhilipp Reisner } 2081b411b363SPhilipp Reisner 2082b411b363SPhilipp Reisner return 1; 2083b411b363SPhilipp Reisner } 2084b411b363SPhilipp Reisner 2085b30ab791SAndreas Gruenbacher static inline int drbd_suspended(struct drbd_device *device) 2086fb22c402SPhilipp Reisner { 20876bbf53caSAndreas Gruenbacher struct drbd_resource *resource = device->resource; 20888e0af25fSPhilipp Reisner 20896bbf53caSAndreas Gruenbacher return resource->susp || resource->susp_fen || resource->susp_nod; 2090fb22c402SPhilipp Reisner } 2091fb22c402SPhilipp Reisner 2092b30ab791SAndreas Gruenbacher static inline bool may_inc_ap_bio(struct drbd_device *device) 2093b411b363SPhilipp Reisner { 2094b30ab791SAndreas Gruenbacher int mxb = drbd_get_max_buffers(device); 2095b411b363SPhilipp Reisner 2096b30ab791SAndreas Gruenbacher if (drbd_suspended(device)) 20971b881ef7SAndreas Gruenbacher return false; 2098b30ab791SAndreas Gruenbacher if (test_bit(SUSPEND_IO, &device->flags)) 20991b881ef7SAndreas Gruenbacher return false; 2100b411b363SPhilipp Reisner 2101b411b363SPhilipp Reisner /* to avoid potential deadlock or bitmap corruption, 2102b411b363SPhilipp Reisner * in various places, we only allow new application io 2103b411b363SPhilipp Reisner * to start during "stable" states. */ 2104b411b363SPhilipp Reisner 2105b411b363SPhilipp Reisner /* no new io accepted when attaching or detaching the disk */ 2106b30ab791SAndreas Gruenbacher if (!drbd_state_is_stable(device)) 21071b881ef7SAndreas Gruenbacher return false; 2108b411b363SPhilipp Reisner 2109b411b363SPhilipp Reisner /* since some older kernels don't have atomic_add_unless, 2110b411b363SPhilipp Reisner * and we are within the spinlock anyways, we have this workaround. */ 2111b30ab791SAndreas Gruenbacher if (atomic_read(&device->ap_bio_cnt) > mxb) 21121b881ef7SAndreas Gruenbacher return false; 2113b30ab791SAndreas Gruenbacher if (test_bit(BITMAP_IO, &device->flags)) 21141b881ef7SAndreas Gruenbacher return false; 21151b881ef7SAndreas Gruenbacher return true; 2116b411b363SPhilipp Reisner } 2117b411b363SPhilipp Reisner 2118b30ab791SAndreas Gruenbacher static inline bool inc_ap_bio_cond(struct drbd_device *device) 21198869d683SPhilipp Reisner { 21201b881ef7SAndreas Gruenbacher bool rv = false; 21218869d683SPhilipp Reisner 21220500813fSAndreas Gruenbacher spin_lock_irq(&device->resource->req_lock); 2123b30ab791SAndreas Gruenbacher rv = may_inc_ap_bio(device); 21248869d683SPhilipp Reisner if (rv) 2125b30ab791SAndreas Gruenbacher atomic_inc(&device->ap_bio_cnt); 21260500813fSAndreas Gruenbacher spin_unlock_irq(&device->resource->req_lock); 21278869d683SPhilipp Reisner 21288869d683SPhilipp Reisner return rv; 21298869d683SPhilipp Reisner } 21308869d683SPhilipp Reisner 2131b30ab791SAndreas Gruenbacher static inline void inc_ap_bio(struct drbd_device *device) 2132b411b363SPhilipp Reisner { 2133b411b363SPhilipp Reisner /* we wait here 2134b411b363SPhilipp Reisner * as long as the device is suspended 2135b411b363SPhilipp Reisner * until the bitmap is no longer on the fly during connection 2136d942ae44SPhilipp Reisner * handshake as long as we would exceed the max_buffer limit. 2137b411b363SPhilipp Reisner * 2138b411b363SPhilipp Reisner * to avoid races with the reconnect code, 2139b411b363SPhilipp Reisner * we need to atomic_inc within the spinlock. */ 2140b411b363SPhilipp Reisner 2141b30ab791SAndreas Gruenbacher wait_event(device->misc_wait, inc_ap_bio_cond(device)); 2142b411b363SPhilipp Reisner } 2143b411b363SPhilipp Reisner 2144b30ab791SAndreas Gruenbacher static inline void dec_ap_bio(struct drbd_device *device) 2145b411b363SPhilipp Reisner { 2146b30ab791SAndreas Gruenbacher int mxb = drbd_get_max_buffers(device); 2147b30ab791SAndreas Gruenbacher int ap_bio = atomic_dec_return(&device->ap_bio_cnt); 2148b411b363SPhilipp Reisner 21490b0ba1efSAndreas Gruenbacher D_ASSERT(device, ap_bio >= 0); 21507ee1fb93SLars Ellenberg 2151b30ab791SAndreas Gruenbacher if (ap_bio == 0 && test_bit(BITMAP_IO, &device->flags)) { 2152b30ab791SAndreas Gruenbacher if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags)) 215384b8c06bSAndreas Gruenbacher drbd_queue_work(&first_peer_device(device)-> 215484b8c06bSAndreas Gruenbacher connection->sender_work, 215584b8c06bSAndreas Gruenbacher &device->bm_io_work.w); 21567ee1fb93SLars Ellenberg } 21577ee1fb93SLars Ellenberg 2158b411b363SPhilipp Reisner /* this currently does wake_up for every dec_ap_bio! 2159b411b363SPhilipp Reisner * maybe rather introduce some type of hysteresis? 2160b411b363SPhilipp Reisner * e.g. (ap_bio == mxb/2 || ap_bio == 0) ? */ 2161b411b363SPhilipp Reisner if (ap_bio < mxb) 2162b30ab791SAndreas Gruenbacher wake_up(&device->misc_wait); 2163b411b363SPhilipp Reisner } 2164b411b363SPhilipp Reisner 2165b30ab791SAndreas Gruenbacher static inline bool verify_can_do_stop_sector(struct drbd_device *device) 216658ffa580SLars Ellenberg { 2167a6b32bc3SAndreas Gruenbacher return first_peer_device(device)->connection->agreed_pro_version >= 97 && 2168a6b32bc3SAndreas Gruenbacher first_peer_device(device)->connection->agreed_pro_version != 100; 216958ffa580SLars Ellenberg } 217058ffa580SLars Ellenberg 2171b30ab791SAndreas Gruenbacher static inline int drbd_set_ed_uuid(struct drbd_device *device, u64 val) 2172b411b363SPhilipp Reisner { 2173b30ab791SAndreas Gruenbacher int changed = device->ed_uuid != val; 2174b30ab791SAndreas Gruenbacher device->ed_uuid = val; 217562b0da3aSLars Ellenberg return changed; 2176b411b363SPhilipp Reisner } 2177b411b363SPhilipp Reisner 2178b30ab791SAndreas Gruenbacher static inline int drbd_queue_order_type(struct drbd_device *device) 2179b411b363SPhilipp Reisner { 2180b411b363SPhilipp Reisner /* sorry, we currently have no working implementation 2181b411b363SPhilipp Reisner * of distributed TCQ stuff */ 2182b411b363SPhilipp Reisner #ifndef QUEUE_ORDERED_NONE 2183b411b363SPhilipp Reisner #define QUEUE_ORDERED_NONE 0 2184b411b363SPhilipp Reisner #endif 2185b411b363SPhilipp Reisner return QUEUE_ORDERED_NONE; 2186b411b363SPhilipp Reisner } 2187b411b363SPhilipp Reisner 218877c556f6SAndreas Gruenbacher static inline struct drbd_connection *first_connection(struct drbd_resource *resource) 218977c556f6SAndreas Gruenbacher { 2190ec4a3407SLars Ellenberg return list_first_entry_or_null(&resource->connections, 219177c556f6SAndreas Gruenbacher struct drbd_connection, connections); 219277c556f6SAndreas Gruenbacher } 219377c556f6SAndreas Gruenbacher 2194b411b363SPhilipp Reisner #endif 2195