Lines Matching +full:we +full:- +full:on +full:- +full:ns
1 // SPDX-License-Identifier: GPL-2.0-only
7 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
8 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
9 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
26 union drbd_state ns; member
52 idr_for_each_entry(&resource->devices, device, vnr) in count_objects()
70 state_change->n_devices = n_devices; in alloc_state_change()
71 state_change->n_connections = n_connections; in alloc_state_change()
72 state_change->devices = (void *)(state_change + 1); in alloc_state_change()
73 state_change->connections = (void *)&state_change->devices[n_devices]; in alloc_state_change()
74 state_change->peer_devices = (void *)&state_change->connections[n_connections]; in alloc_state_change()
75 state_change->resource->resource = NULL; in alloc_state_change()
77 state_change->devices[n].device = NULL; in alloc_state_change()
79 state_change->connections[n].connection = NULL; in alloc_state_change()
103 kref_get(&resource->kref); in remember_old_state()
104 state_change->resource->resource = resource; in remember_old_state()
105 state_change->resource->role[OLD] = in remember_old_state()
107 state_change->resource->susp[OLD] = resource->susp; in remember_old_state()
108 state_change->resource->susp_nod[OLD] = resource->susp_nod; in remember_old_state()
109 state_change->resource->susp_fen[OLD] = resource->susp_fen; in remember_old_state()
111 connection_state_change = state_change->connections; in remember_old_state()
113 kref_get(&connection->kref); in remember_old_state()
114 connection_state_change->connection = connection; in remember_old_state()
115 connection_state_change->cstate[OLD] = in remember_old_state()
116 connection->cstate; in remember_old_state()
117 connection_state_change->peer_role[OLD] = in remember_old_state()
122 device_state_change = state_change->devices; in remember_old_state()
123 peer_device_state_change = state_change->peer_devices; in remember_old_state()
124 idr_for_each_entry(&resource->devices, device, vnr) { in remember_old_state()
125 kref_get(&device->kref); in remember_old_state()
126 device_state_change->device = device; in remember_old_state()
127 device_state_change->disk_state[OLD] = device->state.disk; in remember_old_state()
130 the order of the connections. We may not use for_each_peer_device() here. */ in remember_old_state()
134 peer_device = conn_peer_device(connection, device->vnr); in remember_old_state()
135 peer_device_state_change->peer_device = peer_device; in remember_old_state()
136 peer_device_state_change->disk_state[OLD] = in remember_old_state()
137 device->state.pdsk; in remember_old_state()
138 peer_device_state_change->repl_state[OLD] = in remember_old_state()
140 C_WF_REPORT_PARAMS, device->state.conn); in remember_old_state()
141 peer_device_state_change->resync_susp_user[OLD] = in remember_old_state()
142 device->state.user_isp; in remember_old_state()
143 peer_device_state_change->resync_susp_peer[OLD] = in remember_old_state()
144 device->state.peer_isp; in remember_old_state()
145 peer_device_state_change->resync_susp_dependency[OLD] = in remember_old_state()
146 device->state.aftr_isp; in remember_old_state()
164 resource_state_change = &state_change->resource[0]; in remember_new_state()
165 resource = resource_state_change->resource; in remember_new_state()
167 resource_state_change->role[NEW] = in remember_new_state()
169 resource_state_change->susp[NEW] = resource->susp; in remember_new_state()
170 resource_state_change->susp_nod[NEW] = resource->susp_nod; in remember_new_state()
171 resource_state_change->susp_fen[NEW] = resource->susp_fen; in remember_new_state()
173 for (n = 0; n < state_change->n_devices; n++) { in remember_new_state()
175 &state_change->devices[n]; in remember_new_state()
176 struct drbd_device *device = device_state_change->device; in remember_new_state()
178 device_state_change->disk_state[NEW] = device->state.disk; in remember_new_state()
181 for (n = 0; n < state_change->n_connections; n++) { in remember_new_state()
183 &state_change->connections[n]; in remember_new_state()
185 connection_state_change->connection; in remember_new_state()
187 connection_state_change->cstate[NEW] = connection->cstate; in remember_new_state()
188 connection_state_change->peer_role[NEW] = in remember_new_state()
192 for (n = 0; n < state_change->n_devices * state_change->n_connections; n++) { in remember_new_state()
194 &state_change->peer_devices[n]; in remember_new_state()
196 peer_device_state_change->peer_device->device; in remember_new_state()
197 union drbd_dev_state state = device->state; in remember_new_state()
199 peer_device_state_change->disk_state[NEW] = state.pdsk; in remember_new_state()
200 peer_device_state_change->repl_state[NEW] = in remember_new_state()
202 peer_device_state_change->resync_susp_user[NEW] = in remember_new_state()
204 peer_device_state_change->resync_susp_peer[NEW] = in remember_new_state()
206 peer_device_state_change->resync_susp_dependency[NEW] = in remember_new_state()
213 struct drbd_resource_state_change *resource_state_change = &state_change->resource[0]; in copy_old_to_new_state_change()
219 OLD_TO_NEW(resource_state_change->role); in copy_old_to_new_state_change()
220 OLD_TO_NEW(resource_state_change->susp); in copy_old_to_new_state_change()
221 OLD_TO_NEW(resource_state_change->susp_nod); in copy_old_to_new_state_change()
222 OLD_TO_NEW(resource_state_change->susp_fen); in copy_old_to_new_state_change()
224 for (n_connection = 0; n_connection < state_change->n_connections; n_connection++) { in copy_old_to_new_state_change()
226 &state_change->connections[n_connection]; in copy_old_to_new_state_change()
228 OLD_TO_NEW(connection_state_change->peer_role); in copy_old_to_new_state_change()
229 OLD_TO_NEW(connection_state_change->cstate); in copy_old_to_new_state_change()
232 for (n_device = 0; n_device < state_change->n_devices; n_device++) { in copy_old_to_new_state_change()
234 &state_change->devices[n_device]; in copy_old_to_new_state_change()
236 OLD_TO_NEW(device_state_change->disk_state); in copy_old_to_new_state_change()
239 n_peer_devices = state_change->n_devices * state_change->n_connections; in copy_old_to_new_state_change()
242 &state_change->peer_devices[n_peer_device]; in copy_old_to_new_state_change()
244 OLD_TO_NEW(p->disk_state); in copy_old_to_new_state_change()
245 OLD_TO_NEW(p->repl_state); in copy_old_to_new_state_change()
246 OLD_TO_NEW(p->resync_susp_user); in copy_old_to_new_state_change()
247 OLD_TO_NEW(p->resync_susp_peer); in copy_old_to_new_state_change()
248 OLD_TO_NEW(p->resync_susp_dependency); in copy_old_to_new_state_change()
261 if (state_change->resource->resource) in forget_state_change()
262 kref_put(&state_change->resource->resource->kref, drbd_destroy_resource); in forget_state_change()
263 for (n = 0; n < state_change->n_devices; n++) { in forget_state_change()
264 struct drbd_device *device = state_change->devices[n].device; in forget_state_change()
267 kref_put(&device->kref, drbd_destroy_device); in forget_state_change()
269 for (n = 0; n < state_change->n_connections; n++) { in forget_state_change()
271 state_change->connections[n].connection; in forget_state_change()
274 kref_put(&connection->kref, drbd_destroy_connection); in forget_state_change()
281 union drbd_state ns, enum chg_state_flags flags,
285 static enum drbd_state_rv is_valid_transition(union drbd_state os, union drbd_state ns);
287 union drbd_state ns, enum sanitize_state_warnings *warn);
301 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) { in conn_all_vols_unconf()
302 struct drbd_device *device = peer_device->device; in conn_all_vols_unconf()
303 if (device->state.disk != D_DISKLESS || in conn_all_vols_unconf()
304 device->state.conn != C_STANDALONE || in conn_all_vols_unconf()
305 device->state.role != R_SECONDARY) { in conn_all_vols_unconf()
342 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) { in conn_highest_role()
343 struct drbd_device *device = peer_device->device; in conn_highest_role()
344 role = max_role(role, device->state.role); in conn_highest_role()
358 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) { in conn_highest_peer()
359 struct drbd_device *device = peer_device->device; in conn_highest_peer()
360 peer = max_role(peer, device->state.peer); in conn_highest_peer()
374 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) { in conn_highest_disk()
375 struct drbd_device *device = peer_device->device; in conn_highest_disk()
376 disk_state = max_t(enum drbd_disk_state, disk_state, device->state.disk); in conn_highest_disk()
390 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) { in conn_lowest_disk()
391 struct drbd_device *device = peer_device->device; in conn_lowest_disk()
392 disk_state = min_t(enum drbd_disk_state, disk_state, device->state.disk); in conn_lowest_disk()
406 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) { in conn_highest_pdsk()
407 struct drbd_device *device = peer_device->device; in conn_highest_pdsk()
408 disk_state = max_t(enum drbd_disk_state, disk_state, device->state.pdsk); in conn_highest_pdsk()
422 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) { in conn_lowest_conn()
423 struct drbd_device *device = peer_device->device; in conn_lowest_conn()
424 conn = min_t(enum drbd_conns, conn, device->state.conn); in conn_lowest_conn()
438 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) in no_peer_wf_report_params()
439 if (peer_device->device->state.conn == C_WF_REPORT_PARAMS) { in no_peer_wf_report_params()
454 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) in wake_up_all_devices()
455 wake_up(&peer_device->device->state_wait); in wake_up_all_devices()
462 * cl_wide_st_chg() - true if the state change is a cluster wide one
465 * @ns: new (wanted) state.
468 union drbd_state os, union drbd_state ns) in cl_wide_st_chg() argument
470 return (os.conn >= C_CONNECTED && ns.conn >= C_CONNECTED && in cl_wide_st_chg()
471 ((os.role != R_PRIMARY && ns.role == R_PRIMARY) || in cl_wide_st_chg()
472 (os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) || in cl_wide_st_chg()
473 (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S) || in cl_wide_st_chg()
474 (os.disk != D_FAILED && ns.disk == D_FAILED))) || in cl_wide_st_chg()
475 (os.conn >= C_CONNECTED && ns.conn == C_DISCONNECTING) || in cl_wide_st_chg()
476 (os.conn == C_CONNECTED && ns.conn == C_VERIFY_S) || in cl_wide_st_chg()
477 (os.conn == C_CONNECTED && ns.conn == C_WF_REPORT_PARAMS); in cl_wide_st_chg()
483 union drbd_state ns; in apply_mask_val() local
484 ns.i = (os.i & ~mask.i) | val.i; in apply_mask_val()
485 return ns; in apply_mask_val()
493 union drbd_state ns; in drbd_change_state() local
496 spin_lock_irqsave(&device->resource->req_lock, flags); in drbd_change_state()
497 ns = apply_mask_val(drbd_read_state(device), mask, val); in drbd_change_state()
498 rv = _drbd_set_state(device, ns, f, NULL); in drbd_change_state()
499 spin_unlock_irqrestore(&device->resource->req_lock, flags); in drbd_change_state()
505 * drbd_force_state() - Impose a change which happens outside our control on our state
520 union drbd_state os, ns; in _req_st_cond() local
524 if (test_and_clear_bit(CL_ST_CHG_SUCCESS, &device->flags)) in _req_st_cond()
527 if (test_and_clear_bit(CL_ST_CHG_FAIL, &device->flags)) in _req_st_cond()
530 spin_lock_irqsave(&device->resource->req_lock, flags); in _req_st_cond()
532 ns = sanitize_state(device, os, apply_mask_val(os, mask, val), NULL); in _req_st_cond()
533 rv = is_valid_transition(os, ns); in _req_st_cond()
537 if (!cl_wide_st_chg(device, os, ns)) in _req_st_cond()
540 rv = is_valid_state(device, ns); in _req_st_cond()
542 rv = is_valid_soft_transition(os, ns, first_peer_device(device)->connection); in _req_st_cond()
547 spin_unlock_irqrestore(&device->resource->req_lock, flags); in _req_st_cond()
553 * drbd_req_state() - Perform an eventually cluster wide state change
568 union drbd_state os, ns; in drbd_req_state() local
575 mutex_lock(device->state_mutex); in drbd_req_state()
579 spin_lock_irqsave(&device->resource->req_lock, flags); in drbd_req_state()
581 ns = sanitize_state(device, os, apply_mask_val(os, mask, val), NULL); in drbd_req_state()
582 rv = is_valid_transition(os, ns); in drbd_req_state()
584 spin_unlock_irqrestore(&device->resource->req_lock, flags); in drbd_req_state()
588 if (cl_wide_st_chg(device, os, ns)) { in drbd_req_state()
589 rv = is_valid_state(device, ns); in drbd_req_state()
591 rv = is_valid_soft_transition(os, ns, first_peer_device(device)->connection); in drbd_req_state()
592 spin_unlock_irqrestore(&device->resource->req_lock, flags); in drbd_req_state()
596 print_st_err(device, os, ns, rv); in drbd_req_state()
603 print_st_err(device, os, ns, rv); in drbd_req_state()
607 wait_event(device->state_wait, in drbd_req_state()
612 print_st_err(device, os, ns, rv); in drbd_req_state()
615 spin_lock_irqsave(&device->resource->req_lock, flags); in drbd_req_state()
616 ns = apply_mask_val(drbd_read_state(device), mask, val); in drbd_req_state()
617 rv = _drbd_set_state(device, ns, f, &done); in drbd_req_state()
619 rv = _drbd_set_state(device, ns, f, &done); in drbd_req_state()
622 spin_unlock_irqrestore(&device->resource->req_lock, flags); in drbd_req_state()
625 D_ASSERT(device, current != first_peer_device(device)->connection->worker.task); in drbd_req_state()
633 mutex_unlock(device->state_mutex); in drbd_req_state()
639 * _drbd_request_state() - Request a state change (with flags)
654 wait_event(device->state_wait, in _drbd_request_state()
661 * We grab drbd_md_get_buffer(), because we don't want to "fail" the disk while
662 * there is IO in-flight: the transition into D_FAILED for detach purposes
665 * We wrap it all into wait_event(), to retry in case the drbd_req_state()
669 * drbd_md_get_buffer() while trying to get out of the "transient state", we
675 return drbd_req_state(device, NS(disk, D_FAILED), in request_detach()
683 drbd_suspend_io(device); /* so no-one is stuck in drbd_al_begin_io */ in drbd_request_detach_interruptible()
684 wait_event_interruptible(device->state_wait, in drbd_request_detach_interruptible()
688 ret = wait_event_interruptible(device->misc_wait, in drbd_request_detach_interruptible()
689 device->state.disk != D_FAILED); in drbd_request_detach_interruptible()
707 wait_event_cmd(device->state_wait, in _drbd_request_state_holding_state_mutex()
709 mutex_unlock(device->state_mutex), in _drbd_request_state_holding_state_mutex()
710 mutex_lock(device->state_mutex)); in _drbd_request_state_holding_state_mutex()
715 static void print_st(struct drbd_device *device, const char *name, union drbd_state ns) in print_st() argument
719 drbd_conn_str(ns.conn), in print_st()
720 drbd_role_str(ns.role), in print_st()
721 drbd_role_str(ns.peer), in print_st()
722 drbd_disk_str(ns.disk), in print_st()
723 drbd_disk_str(ns.pdsk), in print_st()
724 is_susp(ns) ? 's' : 'r', in print_st()
725 ns.aftr_isp ? 'a' : '-', in print_st()
726 ns.peer_isp ? 'p' : '-', in print_st()
727 ns.user_isp ? 'u' : '-', in print_st()
728 ns.susp_fen ? 'F' : '-', in print_st()
729 ns.susp_nod ? 'N' : '-' in print_st()
734 union drbd_state ns, enum drbd_state_rv err) in print_st_err() argument
740 print_st(device, "wanted", ns); in print_st_err()
743 static long print_state_change(char *pb, union drbd_state os, union drbd_state ns, in print_state_change() argument
750 if (ns.role != os.role && flags & CS_DC_ROLE) in print_state_change()
751 pbp += sprintf(pbp, "role( %s -> %s ) ", in print_state_change()
753 drbd_role_str(ns.role)); in print_state_change()
754 if (ns.peer != os.peer && flags & CS_DC_PEER) in print_state_change()
755 pbp += sprintf(pbp, "peer( %s -> %s ) ", in print_state_change()
757 drbd_role_str(ns.peer)); in print_state_change()
758 if (ns.conn != os.conn && flags & CS_DC_CONN) in print_state_change()
759 pbp += sprintf(pbp, "conn( %s -> %s ) ", in print_state_change()
761 drbd_conn_str(ns.conn)); in print_state_change()
762 if (ns.disk != os.disk && flags & CS_DC_DISK) in print_state_change()
763 pbp += sprintf(pbp, "disk( %s -> %s ) ", in print_state_change()
765 drbd_disk_str(ns.disk)); in print_state_change()
766 if (ns.pdsk != os.pdsk && flags & CS_DC_PDSK) in print_state_change()
767 pbp += sprintf(pbp, "pdsk( %s -> %s ) ", in print_state_change()
769 drbd_disk_str(ns.pdsk)); in print_state_change()
771 return pbp - pb; in print_state_change()
774 …tic void drbd_pr_state_change(struct drbd_device *device, union drbd_state os, union drbd_state ns, in drbd_pr_state_change() argument
780 pbp += print_state_change(pbp, os, ns, flags ^ CS_DC_MASK); in drbd_pr_state_change()
782 if (ns.aftr_isp != os.aftr_isp) in drbd_pr_state_change()
783 pbp += sprintf(pbp, "aftr_isp( %d -> %d ) ", in drbd_pr_state_change()
785 ns.aftr_isp); in drbd_pr_state_change()
786 if (ns.peer_isp != os.peer_isp) in drbd_pr_state_change()
787 pbp += sprintf(pbp, "peer_isp( %d -> %d ) ", in drbd_pr_state_change()
789 ns.peer_isp); in drbd_pr_state_change()
790 if (ns.user_isp != os.user_isp) in drbd_pr_state_change()
791 pbp += sprintf(pbp, "user_isp( %d -> %d ) ", in drbd_pr_state_change()
793 ns.user_isp); in drbd_pr_state_change()
799 … conn_pr_state_change(struct drbd_connection *connection, union drbd_state os, union drbd_state ns, in conn_pr_state_change() argument
805 pbp += print_state_change(pbp, os, ns, flags); in conn_pr_state_change()
807 if (is_susp(ns) != is_susp(os) && flags & CS_DC_SUSP) in conn_pr_state_change()
808 pbp += sprintf(pbp, "susp( %d -> %d ) ", in conn_pr_state_change()
810 is_susp(ns)); in conn_pr_state_change()
818 * is_valid_state() - Returns an SS_ error code if ns is not valid
820 * @ns: State to consider.
823 is_valid_state(struct drbd_device *device, union drbd_state ns) in is_valid_state() argument
834 fp = rcu_dereference(device->ldev->disk_conf)->fencing; in is_valid_state()
838 nc = rcu_dereference(first_peer_device(device)->connection->net_conf); in is_valid_state()
840 if (!nc->two_primaries && ns.role == R_PRIMARY) { in is_valid_state()
841 if (ns.peer == R_PRIMARY) in is_valid_state()
843 else if (conn_highest_peer(first_peer_device(device)->connection) == R_PRIMARY) in is_valid_state()
850 else if (ns.role == R_SECONDARY && device->open_cnt) in is_valid_state()
853 else if (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.disk < D_UP_TO_DATE) in is_valid_state()
857 ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk >= D_UNKNOWN) in is_valid_state()
860 else if (ns.role == R_PRIMARY && ns.disk <= D_INCONSISTENT && ns.pdsk <= D_INCONSISTENT) in is_valid_state()
863 else if (ns.conn > C_CONNECTED && ns.disk < D_INCONSISTENT) in is_valid_state()
866 else if (ns.conn > C_CONNECTED && ns.pdsk < D_INCONSISTENT) in is_valid_state()
869 else if (ns.conn > C_CONNECTED && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE) in is_valid_state()
872 else if ((ns.conn == C_CONNECTED || in is_valid_state()
873 ns.conn == C_WF_BITMAP_S || in is_valid_state()
874 ns.conn == C_SYNC_SOURCE || in is_valid_state()
875 ns.conn == C_PAUSED_SYNC_S) && in is_valid_state()
876 ns.disk == D_OUTDATED) in is_valid_state()
879 else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && in is_valid_state()
880 (nc->verify_alg[0] == 0)) in is_valid_state()
883 else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && in is_valid_state()
884 first_peer_device(device)->connection->agreed_pro_version < 88) in is_valid_state()
887 else if (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE) in is_valid_state()
890 else if ((ns.conn == C_STARTING_SYNC_S || ns.conn == C_STARTING_SYNC_T) && in is_valid_state()
891 ns.pdsk == D_UNKNOWN) in is_valid_state()
894 else if (ns.conn >= C_CONNECTED && ns.pdsk == D_UNKNOWN) in is_valid_state()
904 * is_valid_soft_transition() - Returns an SS_ error code if the state transition is not possible
908 * @ns: new state.
912 is_valid_soft_transition(union drbd_state os, union drbd_state ns, struct drbd_connection *connecti… in is_valid_soft_transition() argument
916 if ((ns.conn == C_STARTING_SYNC_T || ns.conn == C_STARTING_SYNC_S) && in is_valid_soft_transition()
920 if (ns.conn == C_DISCONNECTING && os.conn == C_STANDALONE) in is_valid_soft_transition()
923 if (ns.disk > D_ATTACHING && os.disk == D_DISKLESS) in is_valid_soft_transition()
926 if (ns.conn == C_WF_CONNECTION && os.conn < C_UNCONNECTED) in is_valid_soft_transition()
929 if (ns.disk == D_OUTDATED && os.disk < D_OUTDATED && os.disk != D_ATTACHING) in is_valid_soft_transition()
932 if (ns.conn == C_DISCONNECTING && os.conn == C_UNCONNECTED) in is_valid_soft_transition()
937 if (test_bit(STATE_SENT, &connection->flags) && in is_valid_soft_transition()
938 !((ns.conn == C_WF_REPORT_PARAMS && os.conn == C_WF_CONNECTION) || in is_valid_soft_transition()
939 (ns.conn >= C_CONNECTED && os.conn == C_WF_REPORT_PARAMS))) in is_valid_soft_transition()
945 if (os.role != R_PRIMARY && ns.role == R_PRIMARY in is_valid_soft_transition()
946 && ns.pdsk == D_UP_TO_DATE in is_valid_soft_transition()
947 && ns.disk != D_UP_TO_DATE && ns.disk != D_DISKLESS in is_valid_soft_transition()
948 && (ns.conn <= C_WF_SYNC_UUID || ns.conn != os.conn)) in is_valid_soft_transition()
951 if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && os.conn < C_CONNECTED) in is_valid_soft_transition()
954 if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && in is_valid_soft_transition()
955 ns.conn != os.conn && os.conn > C_CONNECTED) in is_valid_soft_transition()
958 if ((ns.conn == C_STARTING_SYNC_S || ns.conn == C_STARTING_SYNC_T) && in is_valid_soft_transition()
962 if ((ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE) in is_valid_soft_transition()
964 rv = SS_NEED_CONNECTION; /* No NetworkFailure -> SyncTarget etc... */ in is_valid_soft_transition()
966 if (ns.conn == C_DISCONNECTING && ns.pdsk == D_OUTDATED && in is_valid_soft_transition()
976 /* no change -> nothing to do, at least for the connection part */ in is_valid_conn_transition()
984 /* from C_STANDALONE, we start with C_UNCONNECTED */ in is_valid_conn_transition()
988 /* When establishing a connection we need to go through WF_REPORT_PARAMS! in is_valid_conn_transition()
989 Necessary to do the right thing upon invalidate-remote on a disconnected resource */ in is_valid_conn_transition()
1006 * is_valid_transition() - Returns an SS_ error code if the state transition is not possible
1008 * imposed on DRBD by the environment. E.g. disk broke or network broke down.
1010 * @ns: new state.
1014 is_valid_transition(union drbd_state os, union drbd_state ns) in is_valid_transition() argument
1018 rv = is_valid_conn_transition(os.conn, ns.conn); in is_valid_transition()
1020 /* we cannot fail (again) if we already detached */ in is_valid_transition()
1021 if (ns.disk == D_FAILED && os.disk == D_DISKLESS) in is_valid_transition()
1031 [ABORTED_ONLINE_VERIFY] = "Online-verify aborted.", in print_sanitize_warnings()
1043 * sanitize_state() - Resolves implicitly necessary additional changes to a state transition
1046 * @ns: new state.
1049 * When we loose connection, we have to set the state of the peers disk (pdsk)
1053 union drbd_state ns, enum sanitize_state_warnings *warn) in sanitize_state() argument
1064 fp = rcu_dereference(device->ldev->disk_conf)->fencing; in sanitize_state()
1070 if (ns.conn < C_CONNECTED) { in sanitize_state()
1071 ns.peer_isp = 0; in sanitize_state()
1072 ns.peer = R_UNKNOWN; in sanitize_state()
1073 if (ns.pdsk > D_UNKNOWN || ns.pdsk < D_INCONSISTENT) in sanitize_state()
1074 ns.pdsk = D_UNKNOWN; in sanitize_state()
1078 if (ns.conn == C_STANDALONE && ns.disk == D_DISKLESS && ns.role == R_SECONDARY) in sanitize_state()
1079 ns.aftr_isp = 0; in sanitize_state()
1083 if (ns.conn > C_CONNECTED && (ns.disk <= D_FAILED || ns.pdsk <= D_FAILED)) { in sanitize_state()
1085 *warn = ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T ? in sanitize_state()
1087 ns.conn = C_CONNECTED; in sanitize_state()
1090 /* Connection breaks down before we finished "Negotiating" */ in sanitize_state()
1091 if (ns.conn < C_CONNECTED && ns.disk == D_NEGOTIATING && in sanitize_state()
1093 if (device->ed_uuid == device->ldev->md.uuid[UI_CURRENT]) { in sanitize_state()
1094 ns.disk = device->new_state_tmp.disk; in sanitize_state()
1095 ns.pdsk = device->new_state_tmp.pdsk; in sanitize_state()
1099 ns.disk = D_DISKLESS; in sanitize_state()
1100 ns.pdsk = D_UNKNOWN; in sanitize_state()
1105 /* D_CONSISTENT and D_OUTDATED vanish when we get connected */ in sanitize_state()
1106 if (ns.conn >= C_CONNECTED && ns.conn < C_AHEAD) { in sanitize_state()
1107 if (ns.disk == D_CONSISTENT || ns.disk == D_OUTDATED) in sanitize_state()
1108 ns.disk = D_UP_TO_DATE; in sanitize_state()
1109 if (ns.pdsk == D_CONSISTENT || ns.pdsk == D_OUTDATED) in sanitize_state()
1110 ns.pdsk = D_UP_TO_DATE; in sanitize_state()
1113 /* Implications of the connection state on the disk states */ in sanitize_state()
1118 switch ((enum drbd_conns)ns.conn) { in sanitize_state()
1176 if (ns.disk > disk_max) in sanitize_state()
1177 ns.disk = disk_max; in sanitize_state()
1179 if (ns.disk < disk_min) { in sanitize_state()
1182 ns.disk = disk_min; in sanitize_state()
1184 if (ns.pdsk > pdsk_max) in sanitize_state()
1185 ns.pdsk = pdsk_max; in sanitize_state()
1187 if (ns.pdsk < pdsk_min) { in sanitize_state()
1190 ns.pdsk = pdsk_min; in sanitize_state()
1194 (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk > D_OUTDATED) && in sanitize_state()
1196 ns.susp_fen = 1; /* Suspend IO while fence-peer handler runs (peer lost) */ in sanitize_state()
1198 if (device->resource->res_opts.on_no_data == OND_SUSPEND_IO && in sanitize_state()
1199 (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE) && in sanitize_state()
1201 ns.susp_nod = 1; /* Suspend IO while no data available (no accessible data available) */ in sanitize_state()
1203 if (ns.aftr_isp || ns.peer_isp || ns.user_isp) { in sanitize_state()
1204 if (ns.conn == C_SYNC_SOURCE) in sanitize_state()
1205 ns.conn = C_PAUSED_SYNC_S; in sanitize_state()
1206 if (ns.conn == C_SYNC_TARGET) in sanitize_state()
1207 ns.conn = C_PAUSED_SYNC_T; in sanitize_state()
1209 if (ns.conn == C_PAUSED_SYNC_S) in sanitize_state()
1210 ns.conn = C_SYNC_SOURCE; in sanitize_state()
1211 if (ns.conn == C_PAUSED_SYNC_T) in sanitize_state()
1212 ns.conn = C_SYNC_TARGET; in sanitize_state()
1215 return ns; in sanitize_state()
1220 if (test_and_clear_bit(AL_SUSPENDED, &device->flags)) in drbd_resume_al()
1227 struct drbd_device *device = peer_device->device; in set_ov_position()
1229 if (peer_device->connection->agreed_pro_version < 90) in set_ov_position()
1230 device->ov_start_sector = 0; in set_ov_position()
1231 device->rs_total = drbd_bm_bits(device); in set_ov_position()
1232 device->ov_position = 0; in set_ov_position()
1236 * on C_VERIFY_T, we initialize ov_left and friends in set_ov_position()
1239 device->ov_start_sector = ~(sector_t)0; in set_ov_position()
1241 unsigned long bit = BM_SECT_TO_BIT(device->ov_start_sector); in set_ov_position()
1242 if (bit >= device->rs_total) { in set_ov_position()
1243 device->ov_start_sector = in set_ov_position()
1244 BM_BIT_TO_SECT(device->rs_total - 1); in set_ov_position()
1245 device->rs_total = 1; in set_ov_position()
1247 device->rs_total -= bit; in set_ov_position()
1248 device->ov_position = device->ov_start_sector; in set_ov_position()
1250 device->ov_left = device->rs_total; in set_ov_position()
1254 * _drbd_set_state() - Set a new DRBD state
1256 * @ns: new state.
1263 _drbd_set_state(struct drbd_device *device, union drbd_state ns, in _drbd_set_state() argument
1267 struct drbd_connection *connection = peer_device ? peer_device->connection : NULL; in _drbd_set_state()
1276 ns = sanitize_state(device, os, ns, &ssw); in _drbd_set_state()
1277 if (ns.i == os.i) in _drbd_set_state()
1280 rv = is_valid_transition(os, ns); in _drbd_set_state()
1285 /* pre-state-change checks ; only look at ns */ in _drbd_set_state()
1288 rv = is_valid_state(device, ns); in _drbd_set_state()
1294 rv = is_valid_soft_transition(os, ns, connection); in _drbd_set_state()
1296 rv = is_valid_soft_transition(os, ns, connection); in _drbd_set_state()
1301 print_st_err(device, os, ns, rv); in _drbd_set_state()
1307 drbd_pr_state_change(device, os, ns, flags); in _drbd_set_state()
1310 sanitize_state(). Only display it here if we where not called from in _drbd_set_state()
1313 conn_pr_state_change(connection, os, ns, in _drbd_set_state()
1316 /* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference in _drbd_set_state()
1317 * on the ldev here, to be sure the transition -> D_DISKLESS resp. in _drbd_set_state()
1319 * after_state_ch works run, where we put_ldev again. */ in _drbd_set_state()
1320 if ((os.disk != D_FAILED && ns.disk == D_FAILED) || in _drbd_set_state()
1321 (os.disk != D_DISKLESS && ns.disk == D_DISKLESS)) in _drbd_set_state()
1322 atomic_inc(&device->local_cnt); in _drbd_set_state()
1324 if (!is_sync_state(os.conn) && is_sync_state(ns.conn)) in _drbd_set_state()
1325 clear_bit(RS_DONE, &device->flags); in _drbd_set_state()
1328 state_change = remember_old_state(device->resource, GFP_ATOMIC); in _drbd_set_state()
1332 * depending on that change happens. */ in _drbd_set_state()
1334 device->state.i = ns.i; in _drbd_set_state()
1335 device->resource->susp = ns.susp; in _drbd_set_state()
1336 device->resource->susp_nod = ns.susp_nod; in _drbd_set_state()
1337 device->resource->susp_fen = ns.susp_fen; in _drbd_set_state()
1342 /* put replicated vs not-replicated requests in seperate epochs */ in _drbd_set_state()
1344 drbd_should_do_remote((union drbd_dev_state)ns.i)) in _drbd_set_state()
1347 if (os.disk == D_ATTACHING && ns.disk >= D_NEGOTIATING) in _drbd_set_state()
1351 if (os.conn == C_WF_REPORT_PARAMS && ns.conn != C_WF_REPORT_PARAMS && in _drbd_set_state()
1353 clear_bit(STATE_SENT, &connection->flags); in _drbd_set_state()
1357 wake_up(&device->misc_wait); in _drbd_set_state()
1358 wake_up(&device->state_wait); in _drbd_set_state()
1359 wake_up(&connection->ping_wait); in _drbd_set_state()
1361 /* Aborted verify run, or we reached the stop sector. in _drbd_set_state()
1362 * Log the last position, unless end-of-device. */ in _drbd_set_state()
1364 ns.conn <= C_CONNECTED) { in _drbd_set_state()
1365 device->ov_start_sector = in _drbd_set_state()
1366 BM_BIT_TO_SECT(drbd_bm_bits(device) - device->ov_left); in _drbd_set_state()
1367 if (device->ov_left) in _drbd_set_state()
1369 (unsigned long long)device->ov_start_sector); in _drbd_set_state()
1373 (ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)) { in _drbd_set_state()
1375 device->rs_paused += (long)jiffies in _drbd_set_state()
1376 -(long)device->rs_mark_time[device->rs_last_mark]; in _drbd_set_state()
1377 if (ns.conn == C_SYNC_TARGET) in _drbd_set_state()
1378 mod_timer(&device->resync_timer, jiffies); in _drbd_set_state()
1382 (ns.conn == C_PAUSED_SYNC_T || ns.conn == C_PAUSED_SYNC_S)) { in _drbd_set_state()
1384 device->rs_mark_time[device->rs_last_mark] = jiffies; in _drbd_set_state()
1388 (ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T)) { in _drbd_set_state()
1392 set_ov_position(peer_device, ns.conn); in _drbd_set_state()
1393 device->rs_start = now; in _drbd_set_state()
1394 device->rs_last_sect_ev = 0; in _drbd_set_state()
1395 device->ov_last_oos_size = 0; in _drbd_set_state()
1396 device->ov_last_oos_start = 0; in _drbd_set_state()
1399 device->rs_mark_left[i] = device->ov_left; in _drbd_set_state()
1400 device->rs_mark_time[i] = now; in _drbd_set_state()
1405 if (ns.conn == C_VERIFY_S) { in _drbd_set_state()
1407 (unsigned long long)device->ov_position); in _drbd_set_state()
1408 mod_timer(&device->resync_timer, jiffies); in _drbd_set_state()
1413 u32 mdf = device->ldev->md.flags & ~(MDF_CONSISTENT|MDF_PRIMARY_IND| in _drbd_set_state()
1418 if (test_bit(CRASHED_PRIMARY, &device->flags)) in _drbd_set_state()
1420 if (device->state.role == R_PRIMARY || in _drbd_set_state()
1421 (device->state.pdsk < D_INCONSISTENT && device->state.peer == R_PRIMARY)) in _drbd_set_state()
1423 if (device->state.conn > C_WF_REPORT_PARAMS) in _drbd_set_state()
1425 if (device->state.disk > D_INCONSISTENT) in _drbd_set_state()
1427 if (device->state.disk > D_OUTDATED) in _drbd_set_state()
1429 if (device->state.pdsk <= D_OUTDATED && device->state.pdsk >= D_INCONSISTENT) in _drbd_set_state()
1431 if (mdf != device->ldev->md.flags) { in _drbd_set_state()
1432 device->ldev->md.flags = mdf; in _drbd_set_state()
1435 if (os.disk < D_CONSISTENT && ns.disk >= D_CONSISTENT) in _drbd_set_state()
1436 drbd_set_ed_uuid(device, device->ldev->md.uuid[UI_CURRENT]); in _drbd_set_state()
1442 os.peer == R_SECONDARY && ns.peer == R_PRIMARY) in _drbd_set_state()
1443 set_bit(CONSIDER_RESYNC, &device->flags); in _drbd_set_state()
1446 if (os.conn != C_DISCONNECTING && ns.conn == C_DISCONNECTING) in _drbd_set_state()
1447 drbd_thread_stop_nowait(&connection->receiver); in _drbd_set_state()
1450 if (os.conn != C_STANDALONE && ns.conn == C_STANDALONE) in _drbd_set_state()
1451 drbd_thread_stop_nowait(&connection->receiver); in _drbd_set_state()
1453 /* Upon network failure, we need to restart the receiver. */ in _drbd_set_state()
1455 ns.conn <= C_TEAR_DOWN && ns.conn >= C_TIMEOUT) in _drbd_set_state()
1456 drbd_thread_restart_nowait(&connection->receiver); in _drbd_set_state()
1458 /* Resume AL writing if we get a connection */ in _drbd_set_state()
1459 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) { in _drbd_set_state()
1461 connection->connect_cnt++; in _drbd_set_state()
1465 * kill newly established sessions while we are still trying to thaw in _drbd_set_state()
1468 ns.disk > D_NEGOTIATING) in _drbd_set_state()
1469 device->last_reattach_jif = jiffies; in _drbd_set_state()
1473 ascw->os = os; in _drbd_set_state()
1474 ascw->ns = ns; in _drbd_set_state()
1475 ascw->flags = flags; in _drbd_set_state()
1476 ascw->w.cb = w_after_state_ch; in _drbd_set_state()
1477 ascw->device = device; in _drbd_set_state()
1478 ascw->done = done; in _drbd_set_state()
1479 ascw->state_change = state_change; in _drbd_set_state()
1480 drbd_queue_work(&connection->sender_work, in _drbd_set_state()
1481 &ascw->w); in _drbd_set_state()
1493 struct drbd_device *device = ascw->device; in w_after_state_ch()
1495 after_state_ch(device, ascw->os, ascw->ns, ascw->flags, ascw->state_change); in w_after_state_ch()
1496 forget_state_change(ascw->state_change); in w_after_state_ch()
1497 if (ascw->flags & CS_WAIT_COMPLETE) in w_after_state_ch()
1498 complete(ascw->done); in w_after_state_ch()
1508 _drbd_request_state(device, NS(conn, C_CONNECTED), CS_VERBOSE); in abw_start_sync()
1512 switch (device->state.conn) { in abw_start_sync()
1514 _drbd_request_state(device, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE); in abw_start_sync()
1529 D_ASSERT(device, current == first_peer_device(device)->connection->worker.task); in drbd_bitmap_io_from_worker()
1531 /* open coded non-blocking drbd_suspend_io(device); */ in drbd_bitmap_io_from_worker()
1532 atomic_inc(&device->suspend_cnt); in drbd_bitmap_io_from_worker()
1548 struct drbd_resource *resource = resource_state_change->resource; in notify_resource_state_change()
1550 .res_role = resource_state_change->role[NEW], in notify_resource_state_change()
1551 .res_susp = resource_state_change->susp[NEW], in notify_resource_state_change()
1552 .res_susp_nod = resource_state_change->susp_nod[NEW], in notify_resource_state_change()
1553 .res_susp_fen = resource_state_change->susp_fen[NEW], in notify_resource_state_change()
1564 struct drbd_connection *connection = connection_state_change->connection; in notify_connection_state_change()
1566 .conn_connection_state = connection_state_change->cstate[NEW], in notify_connection_state_change()
1567 .conn_role = connection_state_change->peer_role[NEW], in notify_connection_state_change()
1578 struct drbd_device *device = device_state_change->device; in notify_device_state_change()
1580 .dev_disk_state = device_state_change->disk_state[NEW], in notify_device_state_change()
1591 struct drbd_peer_device *peer_device = p->peer_device; in notify_peer_device_state_change()
1593 .peer_repl_state = p->repl_state[NEW], in notify_peer_device_state_change()
1594 .peer_disk_state = p->disk_state[NEW], in notify_peer_device_state_change()
1595 .peer_resync_susp_user = p->resync_susp_user[NEW], in notify_peer_device_state_change()
1596 .peer_resync_susp_peer = p->resync_susp_peer[NEW], in notify_peer_device_state_change()
1597 .peer_resync_susp_dependency = p->resync_susp_dependency[NEW], in notify_peer_device_state_change()
1605 struct drbd_resource_state_change *resource_state_change = &state_change->resource[0]; in broadcast_state_change()
1626 HAS_CHANGED(resource_state_change->role) || in broadcast_state_change()
1627 HAS_CHANGED(resource_state_change->susp) || in broadcast_state_change()
1628 HAS_CHANGED(resource_state_change->susp_nod) || in broadcast_state_change()
1629 HAS_CHANGED(resource_state_change->susp_fen); in broadcast_state_change()
1635 for (n_connection = 0; n_connection < state_change->n_connections; n_connection++) { in broadcast_state_change()
1637 &state_change->connections[n_connection]; in broadcast_state_change()
1639 if (HAS_CHANGED(connection_state_change->peer_role) || in broadcast_state_change()
1640 HAS_CHANGED(connection_state_change->cstate)) in broadcast_state_change()
1645 for (n_device = 0; n_device < state_change->n_devices; n_device++) { in broadcast_state_change()
1647 &state_change->devices[n_device]; in broadcast_state_change()
1649 if (HAS_CHANGED(device_state_change->disk_state)) in broadcast_state_change()
1654 n_peer_devices = state_change->n_devices * state_change->n_connections; in broadcast_state_change()
1657 &state_change->peer_devices[n_peer_device]; in broadcast_state_change()
1659 if (HAS_CHANGED(p->disk_state) || in broadcast_state_change()
1660 HAS_CHANGED(p->repl_state) || in broadcast_state_change()
1661 HAS_CHANGED(p->resync_susp_user) || in broadcast_state_change()
1662 HAS_CHANGED(p->resync_susp_peer) || in broadcast_state_change()
1663 HAS_CHANGED(p->resync_susp_dependency)) in broadcast_state_change()
1677 static bool lost_contact_to_peer_data(enum drbd_disk_state os, enum drbd_disk_state ns) in lost_contact_to_peer_data() argument
1680 && (ns < D_INCONSISTENT || ns == D_UNKNOWN || ns == D_OUTDATED)) in lost_contact_to_peer_data()
1690 && (ns == D_DISKLESS || ns == D_FAILED || ns == D_OUTDATED)) in lost_contact_to_peer_data()
1697 * after_state_ch() - Perform after state change actions that may sleep
1700 * @ns: new state.
1705 union drbd_state ns, enum chg_state_flags flags, in after_state_ch() argument
1708 struct drbd_resource *resource = device->resource; in after_state_ch()
1710 struct drbd_connection *connection = peer_device ? peer_device->connection : NULL; in after_state_ch()
1717 sib.ns = ns; in after_state_ch()
1720 && (ns.disk == D_UP_TO_DATE && ns.pdsk == D_UP_TO_DATE)) { in after_state_ch()
1721 clear_bit(CRASHED_PRIMARY, &device->flags); in after_state_ch()
1722 if (device->p_uuid) in after_state_ch()
1723 device->p_uuid[UI_FLAGS] &= ~((u64)2); in after_state_ch()
1730 (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE)) in after_state_ch()
1731 drbd_khelper(device, "pri-on-incon-degr"); in after_state_ch()
1733 /* Here we have the actions that are performed after a in after_state_ch()
1736 if (ns.susp_nod) { in after_state_ch()
1739 spin_lock_irq(&device->resource->req_lock); in after_state_ch()
1747 if (resource->susp_nod && what != NOTHING) { in after_state_ch()
1754 spin_unlock_irq(&device->resource->req_lock); in after_state_ch()
1757 if (ns.susp_fen) { in after_state_ch()
1758 spin_lock_irq(&device->resource->req_lock); in after_state_ch()
1759 if (resource->susp_fen && conn_lowest_conn(connection) >= C_CONNECTED) { in after_state_ch()
1765 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) in after_state_ch()
1766 clear_bit(NEW_CUR_UUID, &peer_device->device->flags); in after_state_ch()
1769 /* We should actively create a new uuid, _before_ in after_state_ch()
1770 * we resume/resent, if the peer is diskless in after_state_ch()
1781 spin_unlock_irq(&device->resource->req_lock); in after_state_ch()
1784 /* Became sync source. With protocol >= 96, we still need to send out in after_state_ch()
1789 (ns.conn == C_SYNC_SOURCE || ns.conn == C_PAUSED_SYNC_S) && in after_state_ch()
1790 connection->agreed_pro_version >= 96 && get_ldev(device)) { in after_state_ch()
1797 ns.pdsk > D_DISKLESS && ns.pdsk != D_UNKNOWN) { /* attach on the peer */ in after_state_ch()
1798 /* we probably will start a resync soon. in after_state_ch()
1800 device->rs_total = 0; in after_state_ch()
1801 device->rs_failed = 0; in after_state_ch()
1802 atomic_set(&device->rs_pending_cnt, 0); in after_state_ch()
1806 drbd_send_state(peer_device, ns); in after_state_ch()
1808 /* No point in queuing send_bitmap if we don't have a connection in after_state_ch()
1811 if (os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S && in after_state_ch()
1812 device->state.conn == C_WF_BITMAP_S) in after_state_ch()
1818 if (lost_contact_to_peer_data(os.pdsk, ns.pdsk)) { in after_state_ch()
1820 if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) && in after_state_ch()
1821 device->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) { in after_state_ch()
1823 set_bit(NEW_CUR_UUID, &device->flags); in after_state_ch()
1833 if (ns.pdsk < D_INCONSISTENT && get_ldev(device)) { in after_state_ch()
1834 if (os.peer != R_PRIMARY && ns.peer == R_PRIMARY && in after_state_ch()
1835 device->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) { in after_state_ch()
1840 if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY) in after_state_ch()
1841 /* We may still be Primary ourselves. in after_state_ch()
1849 /* Write out all changed bits on demote. in after_state_ch()
1851 * if there is a resync going on still */ in after_state_ch()
1852 if (os.role == R_PRIMARY && ns.role == R_SECONDARY && in after_state_ch()
1853 device->state.conn <= C_CONNECTED && get_ldev(device)) { in after_state_ch()
1862 if (ns.conn >= C_CONNECTED && in after_state_ch()
1863 os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) { in after_state_ch()
1866 drbd_send_state(peer_device, ns); in after_state_ch()
1869 /* We want to pause/continue resync, tell peer. */ in after_state_ch()
1870 if (ns.conn >= C_CONNECTED && in after_state_ch()
1871 ((os.aftr_isp != ns.aftr_isp) || in after_state_ch()
1872 (os.user_isp != ns.user_isp))) in after_state_ch()
1873 drbd_send_state(peer_device, ns); in after_state_ch()
1877 (ns.aftr_isp || ns.peer_isp || ns.user_isp)) in after_state_ch()
1881 changes (ISP bits) while we were in WFReportParams. */ in after_state_ch()
1882 if (os.conn == C_WF_REPORT_PARAMS && ns.conn >= C_CONNECTED) in after_state_ch()
1883 drbd_send_state(peer_device, ns); in after_state_ch()
1885 if (os.conn != C_AHEAD && ns.conn == C_AHEAD) in after_state_ch()
1886 drbd_send_state(peer_device, ns); in after_state_ch()
1888 /* We are in the progress to start a full sync... */ in after_state_ch()
1889 if ((os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) || in after_state_ch()
1890 (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S)) in after_state_ch()
1899 if (os.disk != D_FAILED && ns.disk == D_FAILED) { in after_state_ch()
1905 * we might come from an failed Attach before ldev was set. */ in after_state_ch()
1906 if (device->ldev) { in after_state_ch()
1908 eh = rcu_dereference(device->ldev->disk_conf)->on_io_error; in after_state_ch()
1911 was_io_error = test_and_clear_bit(WAS_IO_ERROR, &device->flags); in after_state_ch()
1914 * See: 2932204 drbd: call local-io-error handler early in after_state_ch()
1915 * People may chose to hard-reset the box from this handler. in after_state_ch()
1918 drbd_khelper(device, "local-io-error"); in after_state_ch()
1922 * if this was a force-detach due to disk_timeout in after_state_ch()
1923 * or administrator request (drbdsetup detach --force). in after_state_ch()
1929 * have been re-used for other things. in after_state_ch()
1933 if (test_and_clear_bit(FORCE_DETACH, &device->flags)) in after_state_ch()
1939 if (device->state.disk != D_FAILED) in after_state_ch()
1942 drbd_disk_str(device->state.disk)); in after_state_ch()
1944 if (ns.conn >= C_CONNECTED) in after_state_ch()
1945 drbd_send_state(peer_device, ns); in after_state_ch()
1949 /* In case we want to get something to stable storage still, in after_state_ch()
1960 if (os.disk != D_DISKLESS && ns.disk == D_DISKLESS) { in after_state_ch()
1961 /* We must still be diskless, in after_state_ch()
1962 * re-attach has to be serialized with this! */ in after_state_ch()
1963 if (device->state.disk != D_DISKLESS) in after_state_ch()
1966 drbd_disk_str(device->state.disk)); in after_state_ch()
1968 if (ns.conn >= C_CONNECTED) in after_state_ch()
1969 drbd_send_state(peer_device, ns); in after_state_ch()
1976 if (os.disk == D_UP_TO_DATE && ns.disk == D_INCONSISTENT && ns.conn >= C_CONNECTED) in after_state_ch()
1977 drbd_send_state(peer_device, ns); in after_state_ch()
1980 if (ns.disk > D_NEGOTIATING && ns.pdsk > D_NEGOTIATING && in after_state_ch()
1981 test_and_clear_bit(RESYNC_AFTER_NEG, &device->flags)) { in after_state_ch()
1982 if (ns.conn == C_CONNECTED) in after_state_ch()
1987 if ((os.conn > C_CONNECTED && ns.conn <= C_CONNECTED) || in after_state_ch()
1988 (os.peer_isp && !ns.peer_isp) || in after_state_ch()
1989 (os.user_isp && !ns.user_isp)) in after_state_ch()
1993 * it should (at least for non-empty resyncs) already know itself. */ in after_state_ch()
1994 if (os.disk < D_UP_TO_DATE && os.conn >= C_SYNC_SOURCE && ns.conn == C_CONNECTED) in after_state_ch()
1995 drbd_send_state(peer_device, ns); in after_state_ch()
1998 * the stop sector, and we may even have changed the stop sector during in after_state_ch()
2000 if (os.conn == C_VERIFY_S && ns.conn == C_CONNECTED in after_state_ch()
2002 drbd_send_state(peer_device, ns); in after_state_ch()
2006 * failure, or on transition from resync back to AHEAD/BEHIND. in after_state_ch()
2010 * For resync aborted because of local disk failure, we cannot do in after_state_ch()
2016 (ns.conn == C_CONNECTED || ns.conn >= C_AHEAD) && get_ldev(device)) { in after_state_ch()
2023 if (ns.disk == D_DISKLESS && in after_state_ch()
2024 ns.conn == C_STANDALONE && in after_state_ch()
2025 ns.role == R_SECONDARY) { in after_state_ch()
2026 if (os.aftr_isp != ns.aftr_isp) in after_state_ch()
2047 struct drbd_connection *connection = acscw->connection; in w_after_conn_state_ch()
2048 enum drbd_conns oc = acscw->oc; in w_after_conn_state_ch()
2049 union drbd_state ns_max = acscw->ns_max; in w_after_conn_state_ch()
2053 broadcast_state_change(acscw->state_change); in w_after_conn_state_ch()
2054 forget_state_change(acscw->state_change); in w_after_conn_state_ch()
2057 /* Upon network configuration, we need to start the receiver */ in w_after_conn_state_ch()
2059 drbd_thread_start(&connection->receiver); in w_after_conn_state_ch()
2065 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) in w_after_conn_state_ch()
2071 mutex_lock(&connection->resource->conf_update); in w_after_conn_state_ch()
2072 old_conf = connection->net_conf; in w_after_conn_state_ch()
2073 connection->my_addr_len = 0; in w_after_conn_state_ch()
2074 connection->peer_addr_len = 0; in w_after_conn_state_ch()
2075 RCU_INIT_POINTER(connection->net_conf, NULL); in w_after_conn_state_ch()
2077 mutex_unlock(&connection->resource->conf_update); in w_after_conn_state_ch()
2086 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) { in w_after_conn_state_ch()
2087 struct drbd_device *device = peer_device->device; in w_after_conn_state_ch()
2088 if (test_bit(NEW_CUR_UUID, &device->flags)) { in w_after_conn_state_ch()
2090 clear_bit(NEW_CUR_UUID, &device->flags); in w_after_conn_state_ch()
2094 spin_lock_irq(&connection->resource->req_lock); in w_after_conn_state_ch()
2100 spin_unlock_irq(&connection->resource->req_lock); in w_after_conn_state_ch()
2104 kref_put(&connection->kref, drbd_destroy_connection); in w_after_conn_state_ch()
2117 .conn = connection->cstate, in conn_old_common_state()
2123 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) { in conn_old_common_state()
2124 struct drbd_device *device = peer_device->device; in conn_old_common_state()
2125 os = device->state; in conn_old_common_state()
2160 union drbd_state ns, os; in conn_is_valid_transition() local
2165 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) { in conn_is_valid_transition()
2166 struct drbd_device *device = peer_device->device; in conn_is_valid_transition()
2168 ns = sanitize_state(device, os, apply_mask_val(os, mask, val), NULL); in conn_is_valid_transition()
2170 if (flags & CS_IGN_OUTD_FAIL && ns.disk == D_OUTDATED && os.disk < D_OUTDATED) in conn_is_valid_transition()
2171 ns.disk = os.disk; in conn_is_valid_transition()
2173 if (ns.i == os.i) in conn_is_valid_transition()
2176 rv = is_valid_transition(os, ns); in conn_is_valid_transition()
2179 rv = is_valid_state(device, ns); in conn_is_valid_transition()
2182 rv = is_valid_soft_transition(os, ns, connection); in conn_is_valid_transition()
2184 rv = is_valid_soft_transition(os, ns, connection); in conn_is_valid_transition()
2189 print_st_err(device, os, ns, rv); in conn_is_valid_transition()
2202 union drbd_state ns, os, ns_max = { }; in conn_set_state() local
2216 * kill newly established sessions while we are still trying to thaw in conn_set_state()
2218 if (connection->cstate != C_WF_REPORT_PARAMS && val.conn == C_WF_REPORT_PARAMS) in conn_set_state()
2219 connection->last_reconnect_jif = jiffies; in conn_set_state()
2221 connection->cstate = val.conn; in conn_set_state()
2225 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) { in conn_set_state()
2226 struct drbd_device *device = peer_device->device; in conn_set_state()
2229 ns = apply_mask_val(os, mask, val); in conn_set_state()
2230 ns = sanitize_state(device, os, ns, NULL); in conn_set_state()
2232 if (flags & CS_IGN_OUTD_FAIL && ns.disk == D_OUTDATED && os.disk < D_OUTDATED) in conn_set_state()
2233 ns.disk = os.disk; in conn_set_state()
2235 rv = _drbd_set_state(device, ns, flags, NULL); in conn_set_state()
2237 ns.i = device->state.i; in conn_set_state()
2238 ns_max.role = max_role(ns.role, ns_max.role); in conn_set_state()
2239 ns_max.peer = max_role(ns.peer, ns_max.peer); in conn_set_state()
2240 ns_max.conn = max_t(enum drbd_conns, ns.conn, ns_max.conn); in conn_set_state()
2241 ns_max.disk = max_t(enum drbd_disk_state, ns.disk, ns_max.disk); in conn_set_state()
2242 ns_max.pdsk = max_t(enum drbd_disk_state, ns.pdsk, ns_max.pdsk); in conn_set_state()
2244 ns_min.role = min_role(ns.role, ns_min.role); in conn_set_state()
2245 ns_min.peer = min_role(ns.peer, ns_min.peer); in conn_set_state()
2246 ns_min.conn = min_t(enum drbd_conns, ns.conn, ns_min.conn); in conn_set_state()
2247 ns_min.disk = min_t(enum drbd_disk_state, ns.disk, ns_min.disk); in conn_set_state()
2248 ns_min.pdsk = min_t(enum drbd_disk_state, ns.pdsk, ns_min.pdsk); in conn_set_state()
2262 ns_min.susp = ns_max.susp = connection->resource->susp; in conn_set_state()
2263 ns_min.susp_nod = ns_max.susp_nod = connection->resource->susp_nod; in conn_set_state()
2264 ns_min.susp_fen = ns_max.susp_fen = connection->resource->susp_fen; in conn_set_state()
2275 if (test_and_clear_bit(CONN_WD_ST_CHG_OKAY, &connection->flags)) in _conn_rq_cond()
2278 if (test_and_clear_bit(CONN_WD_ST_CHG_FAIL, &connection->flags)) in _conn_rq_cond()
2282 if (err == SS_SUCCESS && connection->cstate == C_WF_REPORT_PARAMS) in _conn_rq_cond()
2294 enum drbd_conns oc = connection->cstate; in _conn_request_state()
2312 /* This will be a cluster-wide state change. in _conn_request_state()
2315 spin_unlock_irq(&connection->resource->req_lock); in _conn_request_state()
2316 mutex_lock(&connection->cstate_mutex); in _conn_request_state()
2319 set_bit(CONN_WD_ST_CHG_REQ, &connection->flags); in _conn_request_state()
2322 clear_bit(CONN_WD_ST_CHG_REQ, &connection->flags); in _conn_request_state()
2324 /* need to re-aquire the spin lock, though */ in _conn_request_state()
2329 set_bit(DISCONNECT_SENT, &connection->flags); in _conn_request_state()
2331 /* ... and re-aquire the spinlock. in _conn_request_state()
2332 * If _conn_rq_cond() returned >= SS_SUCCESS, we must call in _conn_request_state()
2334 spin_lock_irq(&connection->resource->req_lock); in _conn_request_state()
2335 wait_event_lock_irq(connection->ping_wait, in _conn_request_state()
2337 connection->resource->req_lock); in _conn_request_state()
2338 clear_bit(CONN_WD_ST_CHG_REQ, &connection->flags); in _conn_request_state()
2343 state_change = remember_old_state(connection->resource, GFP_ATOMIC); in _conn_request_state()
2352 acscw->oc = os.conn; in _conn_request_state()
2353 acscw->ns_min = ns_min; in _conn_request_state()
2354 acscw->ns_max = ns_max; in _conn_request_state()
2355 acscw->flags = flags; in _conn_request_state()
2356 acscw->w.cb = w_after_conn_state_ch; in _conn_request_state()
2357 kref_get(&connection->kref); in _conn_request_state()
2358 acscw->connection = connection; in _conn_request_state()
2359 acscw->state_change = state_change; in _conn_request_state()
2360 drbd_queue_work(&connection->sender_work, &acscw->w); in _conn_request_state()
2368 * so give up the spinlock, then re-aquire it */ in _conn_request_state()
2369 spin_unlock_irq(&connection->resource->req_lock); in _conn_request_state()
2371 mutex_unlock(&connection->cstate_mutex); in _conn_request_state()
2372 spin_lock_irq(&connection->resource->req_lock); in _conn_request_state()
2388 spin_lock_irq(&connection->resource->req_lock); in conn_request_state()
2390 spin_unlock_irq(&connection->resource->req_lock); in conn_request_state()