1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 drbd_nl.c
4
5 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6
7 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
8 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
9 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10
11
12 */
13
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16 #include <linux/module.h>
17 #include <linux/drbd.h>
18 #include <linux/in.h>
19 #include <linux/fs.h>
20 #include <linux/file.h>
21 #include <linux/slab.h>
22 #include <linux/blkpg.h>
23 #include <linux/cpumask.h>
24 #include "drbd_int.h"
25 #include "drbd_protocol.h"
26 #include "drbd_req.h"
27 #include "drbd_state_change.h"
28 #include <linux/unaligned.h>
29 #include <linux/drbd_limits.h>
30 #include <linux/kthread.h>
31
32 #include <net/genetlink.h>
33
34 /* .doit */
35 // int drbd_adm_create_resource(struct sk_buff *skb, struct genl_info *info);
36 // int drbd_adm_delete_resource(struct sk_buff *skb, struct genl_info *info);
37
38 int drbd_adm_new_minor(struct sk_buff *skb, struct genl_info *info);
39 int drbd_adm_del_minor(struct sk_buff *skb, struct genl_info *info);
40
41 int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info);
42 int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info);
43 int drbd_adm_down(struct sk_buff *skb, struct genl_info *info);
44
45 int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info);
46 int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info);
47 int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info);
48 int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info);
49 int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info);
50 int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info);
51 int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info);
52 int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info);
53 int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info);
54 int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info);
55 int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info);
56 int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info);
57 int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info);
58 int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info);
59 int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info);
60 int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info);
61 int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info);
62 int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info);
63 int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info);
64 int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info);
65 /* .dumpit */
66 int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb);
67 int drbd_adm_dump_resources(struct sk_buff *skb, struct netlink_callback *cb);
68 int drbd_adm_dump_devices(struct sk_buff *skb, struct netlink_callback *cb);
69 int drbd_adm_dump_devices_done(struct netlink_callback *cb);
70 int drbd_adm_dump_connections(struct sk_buff *skb, struct netlink_callback *cb);
71 int drbd_adm_dump_connections_done(struct netlink_callback *cb);
72 int drbd_adm_dump_peer_devices(struct sk_buff *skb, struct netlink_callback *cb);
73 int drbd_adm_dump_peer_devices_done(struct netlink_callback *cb);
74 int drbd_adm_get_initial_state(struct sk_buff *skb, struct netlink_callback *cb);
75
76 #include <linux/drbd_genl_api.h>
77
78 static int drbd_pre_doit(const struct genl_split_ops *ops,
79 struct sk_buff *skb, struct genl_info *info);
80 static void drbd_post_doit(const struct genl_split_ops *ops,
81 struct sk_buff *skb, struct genl_info *info);
82
83 #define GENL_MAGIC_FAMILY_PRE_DOIT drbd_pre_doit
84 #define GENL_MAGIC_FAMILY_POST_DOIT drbd_post_doit
85
86 #include <linux/genl_magic_func.h>
87
88 static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
89 static atomic_t notify_genl_seq = ATOMIC_INIT(2); /* two. */
90
91 DEFINE_MUTEX(notification_mutex);
92
93 /* used bdev_open_by_path, to claim our meta data device(s) */
94 static char *drbd_m_holder = "Hands off! this is DRBD's meta data device.";
95
drbd_adm_send_reply(struct sk_buff * skb,struct genl_info * info)96 static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info)
97 {
98 genlmsg_end(skb, genlmsg_data(nlmsg_data(nlmsg_hdr(skb))));
99 if (genlmsg_reply(skb, info))
100 pr_err("error sending genl reply\n");
101 }
102
103 /* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only
104 * reason it could fail was no space in skb, and there are 4k available. */
drbd_msg_put_info(struct sk_buff * skb,const char * info)105 static int drbd_msg_put_info(struct sk_buff *skb, const char *info)
106 {
107 struct nlattr *nla;
108 int err = -EMSGSIZE;
109
110 if (!info || !info[0])
111 return 0;
112
113 nla = nla_nest_start_noflag(skb, DRBD_NLA_CFG_REPLY);
114 if (!nla)
115 return err;
116
117 err = nla_put_string(skb, T_info_text, info);
118 if (err) {
119 nla_nest_cancel(skb, nla);
120 return err;
121 } else
122 nla_nest_end(skb, nla);
123 return 0;
124 }
125
126 __printf(2, 3)
drbd_msg_sprintf_info(struct sk_buff * skb,const char * fmt,...)127 static int drbd_msg_sprintf_info(struct sk_buff *skb, const char *fmt, ...)
128 {
129 va_list args;
130 struct nlattr *nla, *txt;
131 int err = -EMSGSIZE;
132 int len;
133
134 nla = nla_nest_start_noflag(skb, DRBD_NLA_CFG_REPLY);
135 if (!nla)
136 return err;
137
138 txt = nla_reserve(skb, T_info_text, 256);
139 if (!txt) {
140 nla_nest_cancel(skb, nla);
141 return err;
142 }
143 va_start(args, fmt);
144 len = vscnprintf(nla_data(txt), 256, fmt, args);
145 va_end(args);
146
147 /* maybe: retry with larger reserve, if truncated */
148 txt->nla_len = nla_attr_size(len+1);
149 nlmsg_trim(skb, (char*)txt + NLA_ALIGN(txt->nla_len));
150 nla_nest_end(skb, nla);
151
152 return 0;
153 }
154
155 /* Flags for drbd_adm_prepare() */
156 #define DRBD_ADM_NEED_MINOR (1 << 0)
157 #define DRBD_ADM_NEED_RESOURCE (1 << 1)
158 #define DRBD_ADM_NEED_CONNECTION (1 << 2)
159
160 /* Per-command flags for drbd_pre_doit() */
161 static const unsigned int drbd_genl_cmd_flags[] = {
162 [DRBD_ADM_GET_STATUS] = DRBD_ADM_NEED_MINOR,
163 [DRBD_ADM_NEW_MINOR] = DRBD_ADM_NEED_RESOURCE,
164 [DRBD_ADM_DEL_MINOR] = DRBD_ADM_NEED_MINOR,
165 [DRBD_ADM_NEW_RESOURCE] = 0,
166 [DRBD_ADM_DEL_RESOURCE] = DRBD_ADM_NEED_RESOURCE,
167 [DRBD_ADM_RESOURCE_OPTS] = DRBD_ADM_NEED_RESOURCE,
168 [DRBD_ADM_CONNECT] = DRBD_ADM_NEED_RESOURCE,
169 [DRBD_ADM_CHG_NET_OPTS] = DRBD_ADM_NEED_CONNECTION,
170 [DRBD_ADM_DISCONNECT] = DRBD_ADM_NEED_CONNECTION,
171 [DRBD_ADM_ATTACH] = DRBD_ADM_NEED_MINOR,
172 [DRBD_ADM_CHG_DISK_OPTS] = DRBD_ADM_NEED_MINOR,
173 [DRBD_ADM_RESIZE] = DRBD_ADM_NEED_MINOR,
174 [DRBD_ADM_PRIMARY] = DRBD_ADM_NEED_MINOR,
175 [DRBD_ADM_SECONDARY] = DRBD_ADM_NEED_MINOR,
176 [DRBD_ADM_NEW_C_UUID] = DRBD_ADM_NEED_MINOR,
177 [DRBD_ADM_START_OV] = DRBD_ADM_NEED_MINOR,
178 [DRBD_ADM_DETACH] = DRBD_ADM_NEED_MINOR,
179 [DRBD_ADM_INVALIDATE] = DRBD_ADM_NEED_MINOR,
180 [DRBD_ADM_INVAL_PEER] = DRBD_ADM_NEED_MINOR,
181 [DRBD_ADM_PAUSE_SYNC] = DRBD_ADM_NEED_MINOR,
182 [DRBD_ADM_RESUME_SYNC] = DRBD_ADM_NEED_MINOR,
183 [DRBD_ADM_SUSPEND_IO] = DRBD_ADM_NEED_MINOR,
184 [DRBD_ADM_RESUME_IO] = DRBD_ADM_NEED_MINOR,
185 [DRBD_ADM_OUTDATE] = DRBD_ADM_NEED_MINOR,
186 [DRBD_ADM_GET_TIMEOUT_TYPE] = DRBD_ADM_NEED_MINOR,
187 [DRBD_ADM_DOWN] = DRBD_ADM_NEED_RESOURCE,
188 };
189
190 /*
191 * At this point, we still rely on the global genl_lock().
192 * If we want to avoid that, and allow "genl_family.parallel_ops", we may need
193 * to add additional synchronization against object destruction/modification.
194 */
drbd_adm_prepare(struct drbd_config_context * adm_ctx,struct sk_buff * skb,struct genl_info * info,unsigned flags)195 static int drbd_adm_prepare(struct drbd_config_context *adm_ctx,
196 struct sk_buff *skb, struct genl_info *info, unsigned flags)
197 {
198 struct drbd_genlmsghdr *d_in = genl_info_userhdr(info);
199 const u8 cmd = info->genlhdr->cmd;
200 int err;
201
202 /* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */
203 if (cmd != DRBD_ADM_GET_STATUS && !capable(CAP_NET_ADMIN))
204 return -EPERM;
205
206 adm_ctx->reply_skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
207 if (!adm_ctx->reply_skb) {
208 err = -ENOMEM;
209 goto fail;
210 }
211
212 adm_ctx->reply_dh = genlmsg_put_reply(adm_ctx->reply_skb,
213 info, &drbd_genl_family, 0, cmd);
214 /* put of a few bytes into a fresh skb of >= 4k will always succeed.
215 * but anyways */
216 if (!adm_ctx->reply_dh) {
217 err = -ENOMEM;
218 goto fail;
219 }
220
221 adm_ctx->reply_dh->minor = d_in->minor;
222 adm_ctx->reply_dh->ret_code = NO_ERROR;
223
224 adm_ctx->volume = VOLUME_UNSPECIFIED;
225 if (info->attrs[DRBD_NLA_CFG_CONTEXT]) {
226 struct nlattr *nla;
227 /* parse and validate only */
228 err = drbd_cfg_context_from_attrs(NULL, info);
229 if (err)
230 goto fail;
231
232 /* It was present, and valid,
233 * copy it over to the reply skb. */
234 err = nla_put_nohdr(adm_ctx->reply_skb,
235 info->attrs[DRBD_NLA_CFG_CONTEXT]->nla_len,
236 info->attrs[DRBD_NLA_CFG_CONTEXT]);
237 if (err)
238 goto fail;
239
240 /* and assign stuff to the adm_ctx */
241 nla = nested_attr_tb[T_ctx_volume];
242 if (nla)
243 adm_ctx->volume = nla_get_u32(nla);
244 nla = nested_attr_tb[T_ctx_resource_name];
245 if (nla)
246 adm_ctx->resource_name = nla_data(nla);
247 adm_ctx->my_addr = nested_attr_tb[T_ctx_my_addr];
248 adm_ctx->peer_addr = nested_attr_tb[T_ctx_peer_addr];
249 if ((adm_ctx->my_addr &&
250 nla_len(adm_ctx->my_addr) > sizeof(adm_ctx->connection->my_addr)) ||
251 (adm_ctx->peer_addr &&
252 nla_len(adm_ctx->peer_addr) > sizeof(adm_ctx->connection->peer_addr))) {
253 err = -EINVAL;
254 goto fail;
255 }
256 }
257
258 adm_ctx->minor = d_in->minor;
259 adm_ctx->device = minor_to_device(d_in->minor);
260
261 /* We are protected by the global genl_lock().
262 * But we may explicitly drop it/retake it in drbd_adm_set_role(),
263 * so make sure this object stays around. */
264 if (adm_ctx->device)
265 kref_get(&adm_ctx->device->kref);
266
267 if (adm_ctx->resource_name) {
268 adm_ctx->resource = drbd_find_resource(adm_ctx->resource_name);
269 }
270
271 if (!adm_ctx->device && (flags & DRBD_ADM_NEED_MINOR)) {
272 drbd_msg_put_info(adm_ctx->reply_skb, "unknown minor");
273 return ERR_MINOR_INVALID;
274 }
275 if (!adm_ctx->resource && (flags & DRBD_ADM_NEED_RESOURCE)) {
276 drbd_msg_put_info(adm_ctx->reply_skb, "unknown resource");
277 if (adm_ctx->resource_name)
278 return ERR_RES_NOT_KNOWN;
279 return ERR_INVALID_REQUEST;
280 }
281
282 if (flags & DRBD_ADM_NEED_CONNECTION) {
283 if (adm_ctx->resource) {
284 drbd_msg_put_info(adm_ctx->reply_skb, "no resource name expected");
285 return ERR_INVALID_REQUEST;
286 }
287 if (adm_ctx->device) {
288 drbd_msg_put_info(adm_ctx->reply_skb, "no minor number expected");
289 return ERR_INVALID_REQUEST;
290 }
291 if (adm_ctx->my_addr && adm_ctx->peer_addr)
292 adm_ctx->connection = conn_get_by_addrs(nla_data(adm_ctx->my_addr),
293 nla_len(adm_ctx->my_addr),
294 nla_data(adm_ctx->peer_addr),
295 nla_len(adm_ctx->peer_addr));
296 if (!adm_ctx->connection) {
297 drbd_msg_put_info(adm_ctx->reply_skb, "unknown connection");
298 return ERR_INVALID_REQUEST;
299 }
300 }
301
302 /* some more paranoia, if the request was over-determined */
303 if (adm_ctx->device && adm_ctx->resource &&
304 adm_ctx->device->resource != adm_ctx->resource) {
305 pr_warn("request: minor=%u, resource=%s; but that minor belongs to resource %s\n",
306 adm_ctx->minor, adm_ctx->resource->name,
307 adm_ctx->device->resource->name);
308 drbd_msg_put_info(adm_ctx->reply_skb, "minor exists in different resource");
309 return ERR_INVALID_REQUEST;
310 }
311 if (adm_ctx->device &&
312 adm_ctx->volume != VOLUME_UNSPECIFIED &&
313 adm_ctx->volume != adm_ctx->device->vnr) {
314 pr_warn("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
315 adm_ctx->minor, adm_ctx->volume,
316 adm_ctx->device->vnr, adm_ctx->device->resource->name);
317 drbd_msg_put_info(adm_ctx->reply_skb, "minor exists as different volume");
318 return ERR_INVALID_REQUEST;
319 }
320
321 /* still, provide adm_ctx->resource always, if possible. */
322 if (!adm_ctx->resource) {
323 adm_ctx->resource = adm_ctx->device ? adm_ctx->device->resource
324 : adm_ctx->connection ? adm_ctx->connection->resource : NULL;
325 if (adm_ctx->resource)
326 kref_get(&adm_ctx->resource->kref);
327 }
328
329 return NO_ERROR;
330
331 fail:
332 nlmsg_free(adm_ctx->reply_skb);
333 adm_ctx->reply_skb = NULL;
334 return err;
335 }
336
drbd_pre_doit(const struct genl_split_ops * ops,struct sk_buff * skb,struct genl_info * info)337 static int drbd_pre_doit(const struct genl_split_ops *ops,
338 struct sk_buff *skb, struct genl_info *info)
339 {
340 struct drbd_config_context *adm_ctx;
341 u8 cmd = info->genlhdr->cmd;
342 unsigned int flags;
343 int err;
344
345 adm_ctx = kzalloc_obj(*adm_ctx);
346 if (!adm_ctx)
347 return -ENOMEM;
348
349 flags = (cmd < ARRAY_SIZE(drbd_genl_cmd_flags))
350 ? drbd_genl_cmd_flags[cmd] : 0;
351
352 err = drbd_adm_prepare(adm_ctx, skb, info, flags);
353 if (err && !adm_ctx->reply_skb) {
354 /* Fatal error before reply_skb was allocated. */
355 kfree(adm_ctx);
356 return err;
357 }
358 if (err)
359 adm_ctx->reply_dh->ret_code = err;
360
361 info->user_ptr[0] = adm_ctx;
362 return 0;
363 }
364
drbd_post_doit(const struct genl_split_ops * ops,struct sk_buff * skb,struct genl_info * info)365 static void drbd_post_doit(const struct genl_split_ops *ops,
366 struct sk_buff *skb, struct genl_info *info)
367 {
368 struct drbd_config_context *adm_ctx = info->user_ptr[0];
369
370 if (!adm_ctx)
371 return;
372
373 if (adm_ctx->reply_skb)
374 drbd_adm_send_reply(adm_ctx->reply_skb, info);
375
376 if (adm_ctx->device) {
377 kref_put(&adm_ctx->device->kref, drbd_destroy_device);
378 adm_ctx->device = NULL;
379 }
380 if (adm_ctx->connection) {
381 kref_put(&adm_ctx->connection->kref, &drbd_destroy_connection);
382 adm_ctx->connection = NULL;
383 }
384 if (adm_ctx->resource) {
385 kref_put(&adm_ctx->resource->kref, drbd_destroy_resource);
386 adm_ctx->resource = NULL;
387 }
388
389 kfree(adm_ctx);
390 }
391
setup_khelper_env(struct drbd_connection * connection,char ** envp)392 static void setup_khelper_env(struct drbd_connection *connection, char **envp)
393 {
394 char *afs;
395
396 /* FIXME: A future version will not allow this case. */
397 if (connection->my_addr_len == 0 || connection->peer_addr_len == 0)
398 return;
399
400 switch (((struct sockaddr *)&connection->peer_addr)->sa_family) {
401 case AF_INET6:
402 afs = "ipv6";
403 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI6",
404 &((struct sockaddr_in6 *)&connection->peer_addr)->sin6_addr);
405 break;
406 case AF_INET:
407 afs = "ipv4";
408 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
409 &((struct sockaddr_in *)&connection->peer_addr)->sin_addr);
410 break;
411 default:
412 afs = "ssocks";
413 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
414 &((struct sockaddr_in *)&connection->peer_addr)->sin_addr);
415 }
416 snprintf(envp[3], 20, "DRBD_PEER_AF=%s", afs);
417 }
418
drbd_khelper(struct drbd_device * device,char * cmd)419 int drbd_khelper(struct drbd_device *device, char *cmd)
420 {
421 char *envp[] = { "HOME=/",
422 "TERM=linux",
423 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
424 (char[20]) { }, /* address family */
425 (char[60]) { }, /* address */
426 NULL };
427 char mb[14];
428 char *argv[] = {drbd_usermode_helper, cmd, mb, NULL };
429 struct drbd_connection *connection = first_peer_device(device)->connection;
430 struct sib_info sib;
431 int ret;
432
433 if (current == connection->worker.task)
434 set_bit(CALLBACK_PENDING, &connection->flags);
435
436 snprintf(mb, 14, "minor-%d", device_to_minor(device));
437 setup_khelper_env(connection, envp);
438
439 /* The helper may take some time.
440 * write out any unsynced meta data changes now */
441 drbd_md_sync(device);
442
443 drbd_info(device, "helper command: %s %s %s\n", drbd_usermode_helper, cmd, mb);
444 sib.sib_reason = SIB_HELPER_PRE;
445 sib.helper_name = cmd;
446 drbd_bcast_event(device, &sib);
447 notify_helper(NOTIFY_CALL, device, connection, cmd, 0);
448 ret = call_usermodehelper(drbd_usermode_helper, argv, envp, UMH_WAIT_PROC);
449 if (ret)
450 drbd_warn(device, "helper command: %s %s %s exit code %u (0x%x)\n",
451 drbd_usermode_helper, cmd, mb,
452 (ret >> 8) & 0xff, ret);
453 else
454 drbd_info(device, "helper command: %s %s %s exit code %u (0x%x)\n",
455 drbd_usermode_helper, cmd, mb,
456 (ret >> 8) & 0xff, ret);
457 sib.sib_reason = SIB_HELPER_POST;
458 sib.helper_exit_code = ret;
459 drbd_bcast_event(device, &sib);
460 notify_helper(NOTIFY_RESPONSE, device, connection, cmd, ret);
461
462 if (current == connection->worker.task)
463 clear_bit(CALLBACK_PENDING, &connection->flags);
464
465 if (ret < 0) /* Ignore any ERRNOs we got. */
466 ret = 0;
467
468 return ret;
469 }
470
conn_khelper(struct drbd_connection * connection,char * cmd)471 enum drbd_peer_state conn_khelper(struct drbd_connection *connection, char *cmd)
472 {
473 char *envp[] = { "HOME=/",
474 "TERM=linux",
475 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
476 (char[20]) { }, /* address family */
477 (char[60]) { }, /* address */
478 NULL };
479 char *resource_name = connection->resource->name;
480 char *argv[] = {drbd_usermode_helper, cmd, resource_name, NULL };
481 int ret;
482
483 setup_khelper_env(connection, envp);
484 conn_md_sync(connection);
485
486 drbd_info(connection, "helper command: %s %s %s\n", drbd_usermode_helper, cmd, resource_name);
487 /* TODO: conn_bcast_event() ?? */
488 notify_helper(NOTIFY_CALL, NULL, connection, cmd, 0);
489
490 ret = call_usermodehelper(drbd_usermode_helper, argv, envp, UMH_WAIT_PROC);
491 if (ret)
492 drbd_warn(connection, "helper command: %s %s %s exit code %u (0x%x)\n",
493 drbd_usermode_helper, cmd, resource_name,
494 (ret >> 8) & 0xff, ret);
495 else
496 drbd_info(connection, "helper command: %s %s %s exit code %u (0x%x)\n",
497 drbd_usermode_helper, cmd, resource_name,
498 (ret >> 8) & 0xff, ret);
499 /* TODO: conn_bcast_event() ?? */
500 notify_helper(NOTIFY_RESPONSE, NULL, connection, cmd, ret);
501
502 if (ret < 0) /* Ignore any ERRNOs we got. */
503 ret = 0;
504
505 return ret;
506 }
507
highest_fencing_policy(struct drbd_connection * connection)508 static enum drbd_fencing_p highest_fencing_policy(struct drbd_connection *connection)
509 {
510 enum drbd_fencing_p fp = FP_NOT_AVAIL;
511 struct drbd_peer_device *peer_device;
512 int vnr;
513
514 rcu_read_lock();
515 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
516 struct drbd_device *device = peer_device->device;
517 if (get_ldev_if_state(device, D_CONSISTENT)) {
518 struct disk_conf *disk_conf =
519 rcu_dereference(peer_device->device->ldev->disk_conf);
520 fp = max_t(enum drbd_fencing_p, fp, disk_conf->fencing);
521 put_ldev(device);
522 }
523 }
524 rcu_read_unlock();
525
526 return fp;
527 }
528
resource_is_supended(struct drbd_resource * resource)529 static bool resource_is_supended(struct drbd_resource *resource)
530 {
531 return resource->susp || resource->susp_fen || resource->susp_nod;
532 }
533
conn_try_outdate_peer(struct drbd_connection * connection)534 bool conn_try_outdate_peer(struct drbd_connection *connection)
535 {
536 struct drbd_resource * const resource = connection->resource;
537 unsigned int connect_cnt;
538 union drbd_state mask = { };
539 union drbd_state val = { };
540 enum drbd_fencing_p fp;
541 char *ex_to_string;
542 int r;
543
544 spin_lock_irq(&resource->req_lock);
545 if (connection->cstate >= C_WF_REPORT_PARAMS) {
546 drbd_err(connection, "Expected cstate < C_WF_REPORT_PARAMS\n");
547 spin_unlock_irq(&resource->req_lock);
548 return false;
549 }
550
551 connect_cnt = connection->connect_cnt;
552 spin_unlock_irq(&resource->req_lock);
553
554 fp = highest_fencing_policy(connection);
555 switch (fp) {
556 case FP_NOT_AVAIL:
557 drbd_warn(connection, "Not fencing peer, I'm not even Consistent myself.\n");
558 spin_lock_irq(&resource->req_lock);
559 if (connection->cstate < C_WF_REPORT_PARAMS) {
560 _conn_request_state(connection,
561 (union drbd_state) { { .susp_fen = 1 } },
562 (union drbd_state) { { .susp_fen = 0 } },
563 CS_VERBOSE | CS_HARD | CS_DC_SUSP);
564 /* We are no longer suspended due to the fencing policy.
565 * We may still be suspended due to the on-no-data-accessible policy.
566 * If that was OND_IO_ERROR, fail pending requests. */
567 if (!resource_is_supended(resource))
568 _tl_restart(connection, CONNECTION_LOST_WHILE_PENDING);
569 }
570 /* Else: in case we raced with a connection handshake,
571 * let the handshake figure out if we maybe can RESEND,
572 * and do not resume/fail pending requests here.
573 * Worst case is we stay suspended for now, which may be
574 * resolved by either re-establishing the replication link, or
575 * the next link failure, or eventually the administrator. */
576 spin_unlock_irq(&resource->req_lock);
577 return false;
578
579 case FP_DONT_CARE:
580 return true;
581 default: ;
582 }
583
584 r = conn_khelper(connection, "fence-peer");
585
586 switch ((r>>8) & 0xff) {
587 case P_INCONSISTENT: /* peer is inconsistent */
588 ex_to_string = "peer is inconsistent or worse";
589 mask.pdsk = D_MASK;
590 val.pdsk = D_INCONSISTENT;
591 break;
592 case P_OUTDATED: /* peer got outdated, or was already outdated */
593 ex_to_string = "peer was fenced";
594 mask.pdsk = D_MASK;
595 val.pdsk = D_OUTDATED;
596 break;
597 case P_DOWN: /* peer was down */
598 if (conn_highest_disk(connection) == D_UP_TO_DATE) {
599 /* we will(have) create(d) a new UUID anyways... */
600 ex_to_string = "peer is unreachable, assumed to be dead";
601 mask.pdsk = D_MASK;
602 val.pdsk = D_OUTDATED;
603 } else {
604 ex_to_string = "peer unreachable, doing nothing since disk != UpToDate";
605 }
606 break;
607 case P_PRIMARY: /* Peer is primary, voluntarily outdate myself.
608 * This is useful when an unconnected R_SECONDARY is asked to
609 * become R_PRIMARY, but finds the other peer being active. */
610 ex_to_string = "peer is active";
611 drbd_warn(connection, "Peer is primary, outdating myself.\n");
612 mask.disk = D_MASK;
613 val.disk = D_OUTDATED;
614 break;
615 case P_FENCING:
616 /* THINK: do we need to handle this
617 * like case 4, or more like case 5? */
618 if (fp != FP_STONITH)
619 drbd_err(connection, "fence-peer() = 7 && fencing != Stonith !!!\n");
620 ex_to_string = "peer was stonithed";
621 mask.pdsk = D_MASK;
622 val.pdsk = D_OUTDATED;
623 break;
624 default:
625 /* The script is broken ... */
626 drbd_err(connection, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
627 return false; /* Eventually leave IO frozen */
628 }
629
630 drbd_info(connection, "fence-peer helper returned %d (%s)\n",
631 (r>>8) & 0xff, ex_to_string);
632
633 /* Not using
634 conn_request_state(connection, mask, val, CS_VERBOSE);
635 here, because we might were able to re-establish the connection in the
636 meantime. */
637 spin_lock_irq(&resource->req_lock);
638 if (connection->cstate < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &connection->flags)) {
639 if (connection->connect_cnt != connect_cnt)
640 /* In case the connection was established and droped
641 while the fence-peer handler was running, ignore it */
642 drbd_info(connection, "Ignoring fence-peer exit code\n");
643 else
644 _conn_request_state(connection, mask, val, CS_VERBOSE);
645 }
646 spin_unlock_irq(&resource->req_lock);
647
648 return conn_highest_pdsk(connection) <= D_OUTDATED;
649 }
650
_try_outdate_peer_async(void * data)651 static int _try_outdate_peer_async(void *data)
652 {
653 struct drbd_connection *connection = (struct drbd_connection *)data;
654
655 conn_try_outdate_peer(connection);
656
657 kref_put(&connection->kref, drbd_destroy_connection);
658 return 0;
659 }
660
conn_try_outdate_peer_async(struct drbd_connection * connection)661 void conn_try_outdate_peer_async(struct drbd_connection *connection)
662 {
663 struct task_struct *opa;
664
665 kref_get(&connection->kref);
666 /* We may have just sent a signal to this thread
667 * to get it out of some blocking network function.
668 * Clear signals; otherwise kthread_run(), which internally uses
669 * wait_on_completion_killable(), will mistake our pending signal
670 * for a new fatal signal and fail. */
671 flush_signals(current);
672 opa = kthread_run(_try_outdate_peer_async, connection, "drbd_async_h");
673 if (IS_ERR(opa)) {
674 drbd_err(connection, "out of mem, failed to invoke fence-peer helper\n");
675 kref_put(&connection->kref, drbd_destroy_connection);
676 }
677 }
678
679 enum drbd_state_rv
drbd_set_role(struct drbd_device * const device,enum drbd_role new_role,int force)680 drbd_set_role(struct drbd_device *const device, enum drbd_role new_role, int force)
681 {
682 struct drbd_peer_device *const peer_device = first_peer_device(device);
683 struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL;
684 const int max_tries = 4;
685 enum drbd_state_rv rv = SS_UNKNOWN_ERROR;
686 struct net_conf *nc;
687 int try = 0;
688 int forced = 0;
689 union drbd_state mask, val;
690
691 if (new_role == R_PRIMARY) {
692 struct drbd_connection *connection;
693
694 /* Detect dead peers as soon as possible. */
695
696 rcu_read_lock();
697 for_each_connection(connection, device->resource)
698 request_ping(connection);
699 rcu_read_unlock();
700 }
701
702 mutex_lock(device->state_mutex);
703
704 mask.i = 0; mask.role = R_MASK;
705 val.i = 0; val.role = new_role;
706
707 while (try++ < max_tries) {
708 rv = _drbd_request_state_holding_state_mutex(device, mask, val, CS_WAIT_COMPLETE);
709
710 /* in case we first succeeded to outdate,
711 * but now suddenly could establish a connection */
712 if (rv == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) {
713 val.pdsk = 0;
714 mask.pdsk = 0;
715 continue;
716 }
717
718 if (rv == SS_NO_UP_TO_DATE_DISK && force &&
719 (device->state.disk < D_UP_TO_DATE &&
720 device->state.disk >= D_INCONSISTENT)) {
721 mask.disk = D_MASK;
722 val.disk = D_UP_TO_DATE;
723 forced = 1;
724 continue;
725 }
726
727 if (rv == SS_NO_UP_TO_DATE_DISK &&
728 device->state.disk == D_CONSISTENT && mask.pdsk == 0) {
729 D_ASSERT(device, device->state.pdsk == D_UNKNOWN);
730
731 if (conn_try_outdate_peer(connection)) {
732 val.disk = D_UP_TO_DATE;
733 mask.disk = D_MASK;
734 }
735 continue;
736 }
737
738 if (rv == SS_NOTHING_TO_DO)
739 goto out;
740 if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
741 if (!conn_try_outdate_peer(connection) && force) {
742 drbd_warn(device, "Forced into split brain situation!\n");
743 mask.pdsk = D_MASK;
744 val.pdsk = D_OUTDATED;
745
746 }
747 continue;
748 }
749 if (rv == SS_TWO_PRIMARIES) {
750 /* Maybe the peer is detected as dead very soon...
751 retry at most once more in this case. */
752 if (try < max_tries) {
753 int timeo;
754 try = max_tries - 1;
755 rcu_read_lock();
756 nc = rcu_dereference(connection->net_conf);
757 timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1;
758 rcu_read_unlock();
759 schedule_timeout_interruptible(timeo);
760 }
761 continue;
762 }
763 if (rv < SS_SUCCESS) {
764 rv = _drbd_request_state(device, mask, val,
765 CS_VERBOSE + CS_WAIT_COMPLETE);
766 if (rv < SS_SUCCESS)
767 goto out;
768 }
769 break;
770 }
771
772 if (rv < SS_SUCCESS)
773 goto out;
774
775 if (forced)
776 drbd_warn(device, "Forced to consider local data as UpToDate!\n");
777
778 /* Wait until nothing is on the fly :) */
779 wait_event(device->misc_wait, atomic_read(&device->ap_pending_cnt) == 0);
780
781 /* FIXME also wait for all pending P_BARRIER_ACK? */
782
783 if (new_role == R_SECONDARY) {
784 if (get_ldev(device)) {
785 device->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
786 put_ldev(device);
787 }
788 } else {
789 mutex_lock(&device->resource->conf_update);
790 nc = connection->net_conf;
791 if (nc)
792 nc->discard_my_data = 0; /* without copy; single bit op is atomic */
793 mutex_unlock(&device->resource->conf_update);
794
795 if (get_ldev(device)) {
796 if (((device->state.conn < C_CONNECTED ||
797 device->state.pdsk <= D_FAILED)
798 && device->ldev->md.uuid[UI_BITMAP] == 0) || forced)
799 drbd_uuid_new_current(device);
800
801 device->ldev->md.uuid[UI_CURRENT] |= (u64)1;
802 put_ldev(device);
803 }
804 }
805
806 /* writeout of activity log covered areas of the bitmap
807 * to stable storage done in after state change already */
808
809 if (device->state.conn >= C_WF_REPORT_PARAMS) {
810 /* if this was forced, we should consider sync */
811 if (forced)
812 drbd_send_uuids(peer_device);
813 drbd_send_current_state(peer_device);
814 }
815
816 drbd_md_sync(device);
817 set_disk_ro(device->vdisk, new_role == R_SECONDARY);
818 kobject_uevent(&disk_to_dev(device->vdisk)->kobj, KOBJ_CHANGE);
819 out:
820 mutex_unlock(device->state_mutex);
821 return rv;
822 }
823
from_attrs_err_to_txt(int err)824 static const char *from_attrs_err_to_txt(int err)
825 {
826 return err == -ENOMSG ? "required attribute missing" :
827 err == -EEXIST ? "can not change invariant setting" :
828 "invalid attribute value";
829 }
830
drbd_adm_set_role(struct sk_buff * skb,struct genl_info * info)831 int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info)
832 {
833 struct drbd_config_context *adm_ctx = info->user_ptr[0];
834 struct set_role_parms parms;
835 int err;
836 enum drbd_ret_code retcode;
837 enum drbd_state_rv rv;
838
839 if (!adm_ctx->reply_skb)
840 return 0;
841 retcode = adm_ctx->reply_dh->ret_code;
842 if (retcode != NO_ERROR)
843 goto out;
844
845 memset(&parms, 0, sizeof(parms));
846 if (info->attrs[DRBD_NLA_SET_ROLE_PARMS]) {
847 err = set_role_parms_from_attrs(&parms, info);
848 if (err) {
849 retcode = ERR_MANDATORY_TAG;
850 drbd_msg_put_info(adm_ctx->reply_skb, from_attrs_err_to_txt(err));
851 goto out;
852 }
853 }
854 genl_unlock();
855 mutex_lock(&adm_ctx->resource->adm_mutex);
856
857 if (info->genlhdr->cmd == DRBD_ADM_PRIMARY)
858 rv = drbd_set_role(adm_ctx->device, R_PRIMARY, parms.assume_uptodate);
859 else
860 rv = drbd_set_role(adm_ctx->device, R_SECONDARY, 0);
861
862 mutex_unlock(&adm_ctx->resource->adm_mutex);
863 genl_lock();
864 adm_ctx->reply_dh->ret_code = rv;
865 return 0;
866 out:
867 adm_ctx->reply_dh->ret_code = retcode;
868 return 0;
869 }
870
871 /* Initializes the md.*_offset members, so we are able to find
872 * the on disk meta data.
873 *
874 * We currently have two possible layouts:
875 * external:
876 * |----------- md_size_sect ------------------|
877 * [ 4k superblock ][ activity log ][ Bitmap ]
878 * | al_offset == 8 |
879 * | bm_offset = al_offset + X |
880 * ==> bitmap sectors = md_size_sect - bm_offset
881 *
882 * internal:
883 * |----------- md_size_sect ------------------|
884 * [data.....][ Bitmap ][ activity log ][ 4k superblock ]
885 * | al_offset < 0 |
886 * | bm_offset = al_offset - Y |
887 * ==> bitmap sectors = Y = al_offset - bm_offset
888 *
889 * Activity log size used to be fixed 32kB,
890 * but is about to become configurable.
891 */
drbd_md_set_sector_offsets(struct drbd_device * device,struct drbd_backing_dev * bdev)892 static void drbd_md_set_sector_offsets(struct drbd_device *device,
893 struct drbd_backing_dev *bdev)
894 {
895 sector_t md_size_sect = 0;
896 unsigned int al_size_sect = bdev->md.al_size_4k * 8;
897
898 bdev->md.md_offset = drbd_md_ss(bdev);
899
900 switch (bdev->md.meta_dev_idx) {
901 default:
902 /* v07 style fixed size indexed meta data */
903 bdev->md.md_size_sect = MD_128MB_SECT;
904 bdev->md.al_offset = MD_4kB_SECT;
905 bdev->md.bm_offset = MD_4kB_SECT + al_size_sect;
906 break;
907 case DRBD_MD_INDEX_FLEX_EXT:
908 /* just occupy the full device; unit: sectors */
909 bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev);
910 bdev->md.al_offset = MD_4kB_SECT;
911 bdev->md.bm_offset = MD_4kB_SECT + al_size_sect;
912 break;
913 case DRBD_MD_INDEX_INTERNAL:
914 case DRBD_MD_INDEX_FLEX_INT:
915 /* al size is still fixed */
916 bdev->md.al_offset = -al_size_sect;
917 /* we need (slightly less than) ~ this much bitmap sectors: */
918 md_size_sect = drbd_get_capacity(bdev->backing_bdev);
919 md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
920 md_size_sect = BM_SECT_TO_EXT(md_size_sect);
921 md_size_sect = ALIGN(md_size_sect, 8);
922
923 /* plus the "drbd meta data super block",
924 * and the activity log; */
925 md_size_sect += MD_4kB_SECT + al_size_sect;
926
927 bdev->md.md_size_sect = md_size_sect;
928 /* bitmap offset is adjusted by 'super' block size */
929 bdev->md.bm_offset = -md_size_sect + MD_4kB_SECT;
930 break;
931 }
932 }
933
934 /* input size is expected to be in KB */
ppsize(char * buf,unsigned long long size)935 char *ppsize(char *buf, unsigned long long size)
936 {
937 /* Needs 9 bytes at max including trailing NUL:
938 * -1ULL ==> "16384 EB" */
939 static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
940 int base = 0;
941 while (size >= 10000 && base < sizeof(units)-1) {
942 /* shift + round */
943 size = (size >> 10) + !!(size & (1<<9));
944 base++;
945 }
946 sprintf(buf, "%u %cB", (unsigned)size, units[base]);
947
948 return buf;
949 }
950
951 /* there is still a theoretical deadlock when called from receiver
952 * on an D_INCONSISTENT R_PRIMARY:
953 * remote READ does inc_ap_bio, receiver would need to receive answer
954 * packet from remote to dec_ap_bio again.
955 * receiver receive_sizes(), comes here,
956 * waits for ap_bio_cnt == 0. -> deadlock.
957 * but this cannot happen, actually, because:
958 * R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable
959 * (not connected, or bad/no disk on peer):
960 * see drbd_fail_request_early, ap_bio_cnt is zero.
961 * R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
962 * peer may not initiate a resize.
963 */
964 /* Note these are not to be confused with
965 * drbd_adm_suspend_io/drbd_adm_resume_io,
966 * which are (sub) state changes triggered by admin (drbdsetup),
967 * and can be long lived.
968 * This changes an device->flag, is triggered by drbd internals,
969 * and should be short-lived. */
970 /* It needs to be a counter, since multiple threads might
971 independently suspend and resume IO. */
drbd_suspend_io(struct drbd_device * device)972 void drbd_suspend_io(struct drbd_device *device)
973 {
974 atomic_inc(&device->suspend_cnt);
975 if (drbd_suspended(device))
976 return;
977 wait_event(device->misc_wait, !atomic_read(&device->ap_bio_cnt));
978 }
979
drbd_resume_io(struct drbd_device * device)980 void drbd_resume_io(struct drbd_device *device)
981 {
982 if (atomic_dec_and_test(&device->suspend_cnt))
983 wake_up(&device->misc_wait);
984 }
985
986 /*
987 * drbd_determine_dev_size() - Sets the right device size obeying all constraints
988 * @device: DRBD device.
989 *
990 * Returns 0 on success, negative return values indicate errors.
991 * You should call drbd_md_sync() after calling this function.
992 */
993 enum determine_dev_size
drbd_determine_dev_size(struct drbd_device * device,enum dds_flags flags,struct resize_parms * rs)994 drbd_determine_dev_size(struct drbd_device *device, enum dds_flags flags, struct resize_parms *rs) __must_hold(local)
995 {
996 struct md_offsets_and_sizes {
997 u64 last_agreed_sect;
998 u64 md_offset;
999 s32 al_offset;
1000 s32 bm_offset;
1001 u32 md_size_sect;
1002
1003 u32 al_stripes;
1004 u32 al_stripe_size_4k;
1005 } prev;
1006 sector_t u_size, size;
1007 struct drbd_md *md = &device->ldev->md;
1008 void *buffer;
1009
1010 int md_moved, la_size_changed;
1011 enum determine_dev_size rv = DS_UNCHANGED;
1012
1013 /* We may change the on-disk offsets of our meta data below. Lock out
1014 * anything that may cause meta data IO, to avoid acting on incomplete
1015 * layout changes or scribbling over meta data that is in the process
1016 * of being moved.
1017 *
1018 * Move is not exactly correct, btw, currently we have all our meta
1019 * data in core memory, to "move" it we just write it all out, there
1020 * are no reads. */
1021 drbd_suspend_io(device);
1022 buffer = drbd_md_get_buffer(device, __func__); /* Lock meta-data IO */
1023 if (!buffer) {
1024 drbd_resume_io(device);
1025 return DS_ERROR;
1026 }
1027
1028 /* remember current offset and sizes */
1029 prev.last_agreed_sect = md->la_size_sect;
1030 prev.md_offset = md->md_offset;
1031 prev.al_offset = md->al_offset;
1032 prev.bm_offset = md->bm_offset;
1033 prev.md_size_sect = md->md_size_sect;
1034 prev.al_stripes = md->al_stripes;
1035 prev.al_stripe_size_4k = md->al_stripe_size_4k;
1036
1037 if (rs) {
1038 /* rs is non NULL if we should change the AL layout only */
1039 md->al_stripes = rs->al_stripes;
1040 md->al_stripe_size_4k = rs->al_stripe_size / 4;
1041 md->al_size_4k = (u64)rs->al_stripes * rs->al_stripe_size / 4;
1042 }
1043
1044 drbd_md_set_sector_offsets(device, device->ldev);
1045
1046 rcu_read_lock();
1047 u_size = rcu_dereference(device->ldev->disk_conf)->disk_size;
1048 rcu_read_unlock();
1049 size = drbd_new_dev_size(device, device->ldev, u_size, flags & DDSF_FORCED);
1050
1051 if (size < prev.last_agreed_sect) {
1052 if (rs && u_size == 0) {
1053 /* Remove "rs &&" later. This check should always be active, but
1054 right now the receiver expects the permissive behavior */
1055 drbd_warn(device, "Implicit shrink not allowed. "
1056 "Use --size=%llus for explicit shrink.\n",
1057 (unsigned long long)size);
1058 rv = DS_ERROR_SHRINK;
1059 }
1060 if (u_size > size)
1061 rv = DS_ERROR_SPACE_MD;
1062 if (rv != DS_UNCHANGED)
1063 goto err_out;
1064 }
1065
1066 if (get_capacity(device->vdisk) != size ||
1067 drbd_bm_capacity(device) != size) {
1068 int err;
1069 err = drbd_bm_resize(device, size, !(flags & DDSF_NO_RESYNC));
1070 if (unlikely(err)) {
1071 /* currently there is only one error: ENOMEM! */
1072 size = drbd_bm_capacity(device);
1073 if (size == 0) {
1074 drbd_err(device, "OUT OF MEMORY! "
1075 "Could not allocate bitmap!\n");
1076 } else {
1077 drbd_err(device, "BM resizing failed. "
1078 "Leaving size unchanged\n");
1079 }
1080 rv = DS_ERROR;
1081 }
1082 /* racy, see comments above. */
1083 drbd_set_my_capacity(device, size);
1084 md->la_size_sect = size;
1085 }
1086 if (rv <= DS_ERROR)
1087 goto err_out;
1088
1089 la_size_changed = (prev.last_agreed_sect != md->la_size_sect);
1090
1091 md_moved = prev.md_offset != md->md_offset
1092 || prev.md_size_sect != md->md_size_sect;
1093
1094 if (la_size_changed || md_moved || rs) {
1095 u32 prev_flags;
1096
1097 /* We do some synchronous IO below, which may take some time.
1098 * Clear the timer, to avoid scary "timer expired!" messages,
1099 * "Superblock" is written out at least twice below, anyways. */
1100 timer_delete(&device->md_sync_timer);
1101
1102 /* We won't change the "al-extents" setting, we just may need
1103 * to move the on-disk location of the activity log ringbuffer.
1104 * Lock for transaction is good enough, it may well be "dirty"
1105 * or even "starving". */
1106 wait_event(device->al_wait, lc_try_lock_for_transaction(device->act_log));
1107
1108 /* mark current on-disk bitmap and activity log as unreliable */
1109 prev_flags = md->flags;
1110 md->flags |= MDF_FULL_SYNC | MDF_AL_DISABLED;
1111 drbd_md_write(device, buffer);
1112
1113 drbd_al_initialize(device, buffer);
1114
1115 drbd_info(device, "Writing the whole bitmap, %s\n",
1116 la_size_changed && md_moved ? "size changed and md moved" :
1117 la_size_changed ? "size changed" : "md moved");
1118 /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
1119 drbd_bitmap_io(device, md_moved ? &drbd_bm_write_all : &drbd_bm_write,
1120 "size changed", BM_LOCKED_MASK, NULL);
1121
1122 /* on-disk bitmap and activity log is authoritative again
1123 * (unless there was an IO error meanwhile...) */
1124 md->flags = prev_flags;
1125 drbd_md_write(device, buffer);
1126
1127 if (rs)
1128 drbd_info(device, "Changed AL layout to al-stripes = %d, al-stripe-size-kB = %d\n",
1129 md->al_stripes, md->al_stripe_size_4k * 4);
1130 }
1131
1132 if (size > prev.last_agreed_sect)
1133 rv = prev.last_agreed_sect ? DS_GREW : DS_GREW_FROM_ZERO;
1134 if (size < prev.last_agreed_sect)
1135 rv = DS_SHRUNK;
1136
1137 if (0) {
1138 err_out:
1139 /* restore previous offset and sizes */
1140 md->la_size_sect = prev.last_agreed_sect;
1141 md->md_offset = prev.md_offset;
1142 md->al_offset = prev.al_offset;
1143 md->bm_offset = prev.bm_offset;
1144 md->md_size_sect = prev.md_size_sect;
1145 md->al_stripes = prev.al_stripes;
1146 md->al_stripe_size_4k = prev.al_stripe_size_4k;
1147 md->al_size_4k = (u64)prev.al_stripes * prev.al_stripe_size_4k;
1148 }
1149 lc_unlock(device->act_log);
1150 wake_up(&device->al_wait);
1151 drbd_md_put_buffer(device);
1152 drbd_resume_io(device);
1153
1154 return rv;
1155 }
1156
1157 sector_t
drbd_new_dev_size(struct drbd_device * device,struct drbd_backing_dev * bdev,sector_t u_size,int assume_peer_has_space)1158 drbd_new_dev_size(struct drbd_device *device, struct drbd_backing_dev *bdev,
1159 sector_t u_size, int assume_peer_has_space)
1160 {
1161 sector_t p_size = device->p_size; /* partner's disk size. */
1162 sector_t la_size_sect = bdev->md.la_size_sect; /* last agreed size. */
1163 sector_t m_size; /* my size */
1164 sector_t size = 0;
1165
1166 m_size = drbd_get_max_capacity(bdev);
1167
1168 if (device->state.conn < C_CONNECTED && assume_peer_has_space) {
1169 drbd_warn(device, "Resize while not connected was forced by the user!\n");
1170 p_size = m_size;
1171 }
1172
1173 if (p_size && m_size) {
1174 size = min_t(sector_t, p_size, m_size);
1175 } else {
1176 if (la_size_sect) {
1177 size = la_size_sect;
1178 if (m_size && m_size < size)
1179 size = m_size;
1180 if (p_size && p_size < size)
1181 size = p_size;
1182 } else {
1183 if (m_size)
1184 size = m_size;
1185 if (p_size)
1186 size = p_size;
1187 }
1188 }
1189
1190 if (size == 0)
1191 drbd_err(device, "Both nodes diskless!\n");
1192
1193 if (u_size) {
1194 if (u_size > size)
1195 drbd_err(device, "Requested disk size is too big (%lu > %lu)\n",
1196 (unsigned long)u_size>>1, (unsigned long)size>>1);
1197 else
1198 size = u_size;
1199 }
1200
1201 return size;
1202 }
1203
1204 /*
1205 * drbd_check_al_size() - Ensures that the AL is of the right size
1206 * @device: DRBD device.
1207 *
1208 * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
1209 * failed, and 0 on success. You should call drbd_md_sync() after you called
1210 * this function.
1211 */
drbd_check_al_size(struct drbd_device * device,struct disk_conf * dc)1212 static int drbd_check_al_size(struct drbd_device *device, struct disk_conf *dc)
1213 {
1214 struct lru_cache *n, *t;
1215 struct lc_element *e;
1216 unsigned int in_use;
1217 int i;
1218
1219 if (device->act_log &&
1220 device->act_log->nr_elements == dc->al_extents)
1221 return 0;
1222
1223 in_use = 0;
1224 t = device->act_log;
1225 n = lc_create("act_log", drbd_al_ext_cache, AL_UPDATES_PER_TRANSACTION,
1226 dc->al_extents, sizeof(struct lc_element), 0);
1227
1228 if (n == NULL) {
1229 drbd_err(device, "Cannot allocate act_log lru!\n");
1230 return -ENOMEM;
1231 }
1232 spin_lock_irq(&device->al_lock);
1233 if (t) {
1234 for (i = 0; i < t->nr_elements; i++) {
1235 e = lc_element_by_index(t, i);
1236 if (e->refcnt)
1237 drbd_err(device, "refcnt(%d)==%d\n",
1238 e->lc_number, e->refcnt);
1239 in_use += e->refcnt;
1240 }
1241 }
1242 if (!in_use)
1243 device->act_log = n;
1244 spin_unlock_irq(&device->al_lock);
1245 if (in_use) {
1246 drbd_err(device, "Activity log still in use!\n");
1247 lc_destroy(n);
1248 return -EBUSY;
1249 } else {
1250 lc_destroy(t);
1251 }
1252 drbd_md_mark_dirty(device); /* we changed device->act_log->nr_elemens */
1253 return 0;
1254 }
1255
drbd_max_peer_bio_size(struct drbd_device * device)1256 static unsigned int drbd_max_peer_bio_size(struct drbd_device *device)
1257 {
1258 /*
1259 * We may ignore peer limits if the peer is modern enough. From 8.3.8
1260 * onwards the peer can use multiple BIOs for a single peer_request.
1261 */
1262 if (device->state.conn < C_WF_REPORT_PARAMS)
1263 return device->peer_max_bio_size;
1264
1265 if (first_peer_device(device)->connection->agreed_pro_version < 94)
1266 return min(device->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
1267
1268 /*
1269 * Correct old drbd (up to 8.3.7) if it believes it can do more than
1270 * 32KiB.
1271 */
1272 if (first_peer_device(device)->connection->agreed_pro_version == 94)
1273 return DRBD_MAX_SIZE_H80_PACKET;
1274
1275 /*
1276 * drbd 8.3.8 onwards, before 8.4.0
1277 */
1278 if (first_peer_device(device)->connection->agreed_pro_version < 100)
1279 return DRBD_MAX_BIO_SIZE_P95;
1280 return DRBD_MAX_BIO_SIZE;
1281 }
1282
drbd_max_discard_sectors(struct drbd_connection * connection)1283 static unsigned int drbd_max_discard_sectors(struct drbd_connection *connection)
1284 {
1285 /* when we introduced REQ_WRITE_SAME support, we also bumped
1286 * our maximum supported batch bio size used for discards. */
1287 if (connection->agreed_features & DRBD_FF_WSAME)
1288 return DRBD_MAX_BBIO_SECTORS;
1289 /* before, with DRBD <= 8.4.6, we only allowed up to one AL_EXTENT_SIZE. */
1290 return AL_EXTENT_SIZE >> 9;
1291 }
1292
drbd_discard_supported(struct drbd_connection * connection,struct drbd_backing_dev * bdev)1293 static bool drbd_discard_supported(struct drbd_connection *connection,
1294 struct drbd_backing_dev *bdev)
1295 {
1296 if (bdev && !bdev_max_discard_sectors(bdev->backing_bdev))
1297 return false;
1298
1299 if (connection->cstate >= C_CONNECTED &&
1300 !(connection->agreed_features & DRBD_FF_TRIM)) {
1301 drbd_info(connection,
1302 "peer DRBD too old, does not support TRIM: disabling discards\n");
1303 return false;
1304 }
1305
1306 return true;
1307 }
1308
1309 /* This is the workaround for "bio would need to, but cannot, be split" */
drbd_backing_dev_max_segments(struct drbd_device * device)1310 static unsigned int drbd_backing_dev_max_segments(struct drbd_device *device)
1311 {
1312 unsigned int max_segments;
1313
1314 rcu_read_lock();
1315 max_segments = rcu_dereference(device->ldev->disk_conf)->max_bio_bvecs;
1316 rcu_read_unlock();
1317
1318 if (!max_segments)
1319 return BLK_MAX_SEGMENTS;
1320 return max_segments;
1321 }
1322
drbd_reconsider_queue_parameters(struct drbd_device * device,struct drbd_backing_dev * bdev,struct o_qlim * o)1323 void drbd_reconsider_queue_parameters(struct drbd_device *device,
1324 struct drbd_backing_dev *bdev, struct o_qlim *o)
1325 {
1326 struct drbd_connection *connection =
1327 first_peer_device(device)->connection;
1328 struct request_queue * const q = device->rq_queue;
1329 unsigned int now = queue_max_hw_sectors(q) << 9;
1330 struct queue_limits lim;
1331 struct request_queue *b = NULL;
1332 unsigned int new;
1333
1334 if (bdev) {
1335 b = bdev->backing_bdev->bd_disk->queue;
1336
1337 device->local_max_bio_size =
1338 queue_max_hw_sectors(b) << SECTOR_SHIFT;
1339 }
1340
1341 /*
1342 * We may later detach and re-attach on a disconnected Primary. Avoid
1343 * decreasing the value in this case.
1344 *
1345 * We want to store what we know the peer DRBD can handle, not what the
1346 * peer IO backend can handle.
1347 */
1348 new = min3(DRBD_MAX_BIO_SIZE, device->local_max_bio_size,
1349 max(drbd_max_peer_bio_size(device), device->peer_max_bio_size));
1350 if (new != now) {
1351 if (device->state.role == R_PRIMARY && new < now)
1352 drbd_err(device, "ASSERT FAILED new < now; (%u < %u)\n",
1353 new, now);
1354 drbd_info(device, "max BIO size = %u\n", new);
1355 }
1356
1357 lim = queue_limits_start_update(q);
1358 if (bdev) {
1359 blk_set_stacking_limits(&lim);
1360 lim.max_segments = drbd_backing_dev_max_segments(device);
1361 } else {
1362 lim.max_segments = BLK_MAX_SEGMENTS;
1363 lim.features = BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA |
1364 BLK_FEAT_ROTATIONAL | BLK_FEAT_STABLE_WRITES;
1365 }
1366
1367 lim.max_hw_sectors = new >> SECTOR_SHIFT;
1368 lim.seg_boundary_mask = PAGE_SIZE - 1;
1369
1370 /*
1371 * We don't care for the granularity, really.
1372 *
1373 * Stacking limits below should fix it for the local device. Whether or
1374 * not it is a suitable granularity on the remote device is not our
1375 * problem, really. If you care, you need to use devices with similar
1376 * topology on all peers.
1377 */
1378 if (drbd_discard_supported(connection, bdev)) {
1379 lim.discard_granularity = 512;
1380 lim.max_hw_discard_sectors =
1381 drbd_max_discard_sectors(connection);
1382 } else {
1383 lim.discard_granularity = 0;
1384 lim.max_hw_discard_sectors = 0;
1385 }
1386
1387 if (bdev) {
1388 blk_stack_limits(&lim, &b->limits, 0);
1389 /*
1390 * blk_set_stacking_limits() cleared the features, and
1391 * blk_stack_limits() may or may not have inherited
1392 * BLK_FEAT_STABLE_WRITES from the backing device.
1393 *
1394 * DRBD always requires stable writes because:
1395 * 1. The same bio data is read for both local disk I/O and
1396 * network transmission. If the page changes mid-flight,
1397 * the local and remote copies could diverge.
1398 * 2. When data integrity is enabled, DRBD calculates a
1399 * checksum before sending the data. If the page changes
1400 * between checksum calculation and transmission, the
1401 * receiver will detect a checksum mismatch.
1402 */
1403 lim.features |= BLK_FEAT_STABLE_WRITES;
1404 }
1405
1406 /*
1407 * If we can handle "zeroes" efficiently on the protocol, we want to do
1408 * that, even if our backend does not announce max_write_zeroes_sectors
1409 * itself.
1410 */
1411 if (connection->agreed_features & DRBD_FF_WZEROES)
1412 lim.max_write_zeroes_sectors = DRBD_MAX_BBIO_SECTORS;
1413 else
1414 lim.max_write_zeroes_sectors = 0;
1415 lim.max_hw_wzeroes_unmap_sectors = 0;
1416
1417 if ((lim.discard_granularity >> SECTOR_SHIFT) >
1418 lim.max_hw_discard_sectors) {
1419 lim.discard_granularity = 0;
1420 lim.max_hw_discard_sectors = 0;
1421 }
1422
1423 if (queue_limits_commit_update(q, &lim))
1424 drbd_err(device, "setting new queue limits failed\n");
1425 }
1426
1427 /* Starts the worker thread */
conn_reconfig_start(struct drbd_connection * connection)1428 static void conn_reconfig_start(struct drbd_connection *connection)
1429 {
1430 drbd_thread_start(&connection->worker);
1431 drbd_flush_workqueue(&connection->sender_work);
1432 }
1433
1434 /* if still unconfigured, stops worker again. */
conn_reconfig_done(struct drbd_connection * connection)1435 static void conn_reconfig_done(struct drbd_connection *connection)
1436 {
1437 bool stop_threads;
1438 spin_lock_irq(&connection->resource->req_lock);
1439 stop_threads = conn_all_vols_unconf(connection) &&
1440 connection->cstate == C_STANDALONE;
1441 spin_unlock_irq(&connection->resource->req_lock);
1442 if (stop_threads) {
1443 /* ack_receiver thread and ack_sender workqueue are implicitly
1444 * stopped by receiver in conn_disconnect() */
1445 drbd_thread_stop(&connection->receiver);
1446 drbd_thread_stop(&connection->worker);
1447 }
1448 }
1449
1450 /* Make sure IO is suspended before calling this function(). */
drbd_suspend_al(struct drbd_device * device)1451 static void drbd_suspend_al(struct drbd_device *device)
1452 {
1453 int s = 0;
1454
1455 if (!lc_try_lock(device->act_log)) {
1456 drbd_warn(device, "Failed to lock al in drbd_suspend_al()\n");
1457 return;
1458 }
1459
1460 drbd_al_shrink(device);
1461 spin_lock_irq(&device->resource->req_lock);
1462 if (device->state.conn < C_CONNECTED)
1463 s = !test_and_set_bit(AL_SUSPENDED, &device->flags);
1464 spin_unlock_irq(&device->resource->req_lock);
1465 lc_unlock(device->act_log);
1466
1467 if (s)
1468 drbd_info(device, "Suspended AL updates\n");
1469 }
1470
1471
should_set_defaults(struct genl_info * info)1472 static bool should_set_defaults(struct genl_info *info)
1473 {
1474 struct drbd_genlmsghdr *dh = genl_info_userhdr(info);
1475
1476 return 0 != (dh->flags & DRBD_GENL_F_SET_DEFAULTS);
1477 }
1478
drbd_al_extents_max(struct drbd_backing_dev * bdev)1479 static unsigned int drbd_al_extents_max(struct drbd_backing_dev *bdev)
1480 {
1481 /* This is limited by 16 bit "slot" numbers,
1482 * and by available on-disk context storage.
1483 *
1484 * Also (u16)~0 is special (denotes a "free" extent).
1485 *
1486 * One transaction occupies one 4kB on-disk block,
1487 * we have n such blocks in the on disk ring buffer,
1488 * the "current" transaction may fail (n-1),
1489 * and there is 919 slot numbers context information per transaction.
1490 *
1491 * 72 transaction blocks amounts to more than 2**16 context slots,
1492 * so cap there first.
1493 */
1494 const unsigned int max_al_nr = DRBD_AL_EXTENTS_MAX;
1495 const unsigned int sufficient_on_disk =
1496 (max_al_nr + AL_CONTEXT_PER_TRANSACTION -1)
1497 /AL_CONTEXT_PER_TRANSACTION;
1498
1499 unsigned int al_size_4k = bdev->md.al_size_4k;
1500
1501 if (al_size_4k > sufficient_on_disk)
1502 return max_al_nr;
1503
1504 return (al_size_4k - 1) * AL_CONTEXT_PER_TRANSACTION;
1505 }
1506
write_ordering_changed(struct disk_conf * a,struct disk_conf * b)1507 static bool write_ordering_changed(struct disk_conf *a, struct disk_conf *b)
1508 {
1509 return a->disk_barrier != b->disk_barrier ||
1510 a->disk_flushes != b->disk_flushes ||
1511 a->disk_drain != b->disk_drain;
1512 }
1513
sanitize_disk_conf(struct drbd_device * device,struct disk_conf * disk_conf,struct drbd_backing_dev * nbc)1514 static void sanitize_disk_conf(struct drbd_device *device, struct disk_conf *disk_conf,
1515 struct drbd_backing_dev *nbc)
1516 {
1517 struct block_device *bdev = nbc->backing_bdev;
1518
1519 if (disk_conf->al_extents < DRBD_AL_EXTENTS_MIN)
1520 disk_conf->al_extents = DRBD_AL_EXTENTS_MIN;
1521 if (disk_conf->al_extents > drbd_al_extents_max(nbc))
1522 disk_conf->al_extents = drbd_al_extents_max(nbc);
1523
1524 if (!bdev_max_discard_sectors(bdev)) {
1525 if (disk_conf->rs_discard_granularity) {
1526 disk_conf->rs_discard_granularity = 0; /* disable feature */
1527 drbd_info(device, "rs_discard_granularity feature disabled\n");
1528 }
1529 }
1530
1531 if (disk_conf->rs_discard_granularity) {
1532 int orig_value = disk_conf->rs_discard_granularity;
1533 sector_t discard_size = bdev_max_discard_sectors(bdev) << 9;
1534 unsigned int discard_granularity = bdev_discard_granularity(bdev);
1535 int remainder;
1536
1537 if (discard_granularity > disk_conf->rs_discard_granularity)
1538 disk_conf->rs_discard_granularity = discard_granularity;
1539
1540 remainder = disk_conf->rs_discard_granularity %
1541 discard_granularity;
1542 disk_conf->rs_discard_granularity += remainder;
1543
1544 if (disk_conf->rs_discard_granularity > discard_size)
1545 disk_conf->rs_discard_granularity = discard_size;
1546
1547 if (disk_conf->rs_discard_granularity != orig_value)
1548 drbd_info(device, "rs_discard_granularity changed to %d\n",
1549 disk_conf->rs_discard_granularity);
1550 }
1551 }
1552
disk_opts_check_al_size(struct drbd_device * device,struct disk_conf * dc)1553 static int disk_opts_check_al_size(struct drbd_device *device, struct disk_conf *dc)
1554 {
1555 int err = -EBUSY;
1556
1557 if (device->act_log &&
1558 device->act_log->nr_elements == dc->al_extents)
1559 return 0;
1560
1561 drbd_suspend_io(device);
1562 /* If IO completion is currently blocked, we would likely wait
1563 * "forever" for the activity log to become unused. So we don't. */
1564 if (atomic_read(&device->ap_bio_cnt))
1565 goto out;
1566
1567 wait_event(device->al_wait, lc_try_lock(device->act_log));
1568 drbd_al_shrink(device);
1569 err = drbd_check_al_size(device, dc);
1570 lc_unlock(device->act_log);
1571 wake_up(&device->al_wait);
1572 out:
1573 drbd_resume_io(device);
1574 return err;
1575 }
1576
drbd_adm_disk_opts(struct sk_buff * skb,struct genl_info * info)1577 int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
1578 {
1579 struct drbd_config_context *adm_ctx = info->user_ptr[0];
1580 enum drbd_ret_code retcode;
1581 struct drbd_device *device;
1582 struct disk_conf *new_disk_conf, *old_disk_conf;
1583 struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
1584 int err;
1585 unsigned int fifo_size;
1586
1587 if (!adm_ctx->reply_skb)
1588 return 0;
1589 retcode = adm_ctx->reply_dh->ret_code;
1590 if (retcode != NO_ERROR)
1591 goto finish;
1592
1593 device = adm_ctx->device;
1594 mutex_lock(&adm_ctx->resource->adm_mutex);
1595
1596 /* we also need a disk
1597 * to change the options on */
1598 if (!get_ldev(device)) {
1599 retcode = ERR_NO_DISK;
1600 goto out;
1601 }
1602
1603 new_disk_conf = kmalloc_obj(struct disk_conf);
1604 if (!new_disk_conf) {
1605 retcode = ERR_NOMEM;
1606 goto fail;
1607 }
1608
1609 mutex_lock(&device->resource->conf_update);
1610 old_disk_conf = device->ldev->disk_conf;
1611 *new_disk_conf = *old_disk_conf;
1612 if (should_set_defaults(info))
1613 set_disk_conf_defaults(new_disk_conf);
1614
1615 err = disk_conf_from_attrs_for_change(new_disk_conf, info);
1616 if (err && err != -ENOMSG) {
1617 retcode = ERR_MANDATORY_TAG;
1618 drbd_msg_put_info(adm_ctx->reply_skb, from_attrs_err_to_txt(err));
1619 goto fail_unlock;
1620 }
1621
1622 if (!expect(device, new_disk_conf->resync_rate >= 1))
1623 new_disk_conf->resync_rate = 1;
1624
1625 sanitize_disk_conf(device, new_disk_conf, device->ldev);
1626
1627 if (new_disk_conf->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
1628 new_disk_conf->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
1629
1630 fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
1631 if (fifo_size != device->rs_plan_s->size) {
1632 new_plan = fifo_alloc(fifo_size);
1633 if (!new_plan) {
1634 drbd_err(device, "kmalloc of fifo_buffer failed");
1635 retcode = ERR_NOMEM;
1636 goto fail_unlock;
1637 }
1638 }
1639
1640 err = disk_opts_check_al_size(device, new_disk_conf);
1641 if (err) {
1642 /* Could be just "busy". Ignore?
1643 * Introduce dedicated error code? */
1644 drbd_msg_put_info(adm_ctx->reply_skb,
1645 "Try again without changing current al-extents setting");
1646 retcode = ERR_NOMEM;
1647 goto fail_unlock;
1648 }
1649
1650 lock_all_resources();
1651 retcode = drbd_resync_after_valid(device, new_disk_conf->resync_after);
1652 if (retcode == NO_ERROR) {
1653 rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
1654 drbd_resync_after_changed(device);
1655 }
1656 unlock_all_resources();
1657
1658 if (retcode != NO_ERROR)
1659 goto fail_unlock;
1660
1661 if (new_plan) {
1662 old_plan = device->rs_plan_s;
1663 rcu_assign_pointer(device->rs_plan_s, new_plan);
1664 }
1665
1666 mutex_unlock(&device->resource->conf_update);
1667
1668 if (new_disk_conf->al_updates)
1669 device->ldev->md.flags &= ~MDF_AL_DISABLED;
1670 else
1671 device->ldev->md.flags |= MDF_AL_DISABLED;
1672
1673 if (new_disk_conf->md_flushes)
1674 clear_bit(MD_NO_FUA, &device->flags);
1675 else
1676 set_bit(MD_NO_FUA, &device->flags);
1677
1678 if (write_ordering_changed(old_disk_conf, new_disk_conf))
1679 drbd_bump_write_ordering(device->resource, NULL, WO_BDEV_FLUSH);
1680
1681 if (old_disk_conf->discard_zeroes_if_aligned !=
1682 new_disk_conf->discard_zeroes_if_aligned)
1683 drbd_reconsider_queue_parameters(device, device->ldev, NULL);
1684
1685 drbd_md_sync(device);
1686
1687 if (device->state.conn >= C_CONNECTED) {
1688 struct drbd_peer_device *peer_device;
1689
1690 for_each_peer_device(peer_device, device)
1691 drbd_send_sync_param(peer_device);
1692 }
1693
1694 kvfree_rcu_mightsleep(old_disk_conf);
1695 kfree(old_plan);
1696 mod_timer(&device->request_timer, jiffies + HZ);
1697 goto success;
1698
1699 fail_unlock:
1700 mutex_unlock(&device->resource->conf_update);
1701 fail:
1702 kfree(new_disk_conf);
1703 kfree(new_plan);
1704 success:
1705 put_ldev(device);
1706 out:
1707 mutex_unlock(&adm_ctx->resource->adm_mutex);
1708 finish:
1709 adm_ctx->reply_dh->ret_code = retcode;
1710 return 0;
1711 }
1712
open_backing_dev(struct drbd_device * device,const char * bdev_path,void * claim_ptr,bool do_bd_link)1713 static struct file *open_backing_dev(struct drbd_device *device,
1714 const char *bdev_path, void *claim_ptr, bool do_bd_link)
1715 {
1716 struct file *file;
1717 int err = 0;
1718
1719 file = bdev_file_open_by_path(bdev_path, BLK_OPEN_READ | BLK_OPEN_WRITE,
1720 claim_ptr, NULL);
1721 if (IS_ERR(file)) {
1722 drbd_err(device, "open(\"%s\") failed with %ld\n",
1723 bdev_path, PTR_ERR(file));
1724 return file;
1725 }
1726
1727 if (!do_bd_link)
1728 return file;
1729
1730 err = bd_link_disk_holder(file_bdev(file), device->vdisk);
1731 if (err) {
1732 fput(file);
1733 drbd_err(device, "bd_link_disk_holder(\"%s\", ...) failed with %d\n",
1734 bdev_path, err);
1735 file = ERR_PTR(err);
1736 }
1737 return file;
1738 }
1739
open_backing_devices(struct drbd_device * device,struct disk_conf * new_disk_conf,struct drbd_backing_dev * nbc)1740 static int open_backing_devices(struct drbd_device *device,
1741 struct disk_conf *new_disk_conf,
1742 struct drbd_backing_dev *nbc)
1743 {
1744 struct file *file;
1745
1746 file = open_backing_dev(device, new_disk_conf->backing_dev, device,
1747 true);
1748 if (IS_ERR(file))
1749 return ERR_OPEN_DISK;
1750 nbc->backing_bdev = file_bdev(file);
1751 nbc->backing_bdev_file = file;
1752
1753 /*
1754 * meta_dev_idx >= 0: external fixed size, possibly multiple
1755 * drbd sharing one meta device. TODO in that case, paranoia
1756 * check that [md_bdev, meta_dev_idx] is not yet used by some
1757 * other drbd minor! (if you use drbd.conf + drbdadm, that
1758 * should check it for you already; but if you don't, or
1759 * someone fooled it, we need to double check here)
1760 */
1761 file = open_backing_dev(device, new_disk_conf->meta_dev,
1762 /* claim ptr: device, if claimed exclusively; shared drbd_m_holder,
1763 * if potentially shared with other drbd minors */
1764 (new_disk_conf->meta_dev_idx < 0) ? (void*)device : (void*)drbd_m_holder,
1765 /* avoid double bd_claim_by_disk() for the same (source,target) tuple,
1766 * as would happen with internal metadata. */
1767 (new_disk_conf->meta_dev_idx != DRBD_MD_INDEX_FLEX_INT &&
1768 new_disk_conf->meta_dev_idx != DRBD_MD_INDEX_INTERNAL));
1769 if (IS_ERR(file))
1770 return ERR_OPEN_MD_DISK;
1771 nbc->md_bdev = file_bdev(file);
1772 nbc->f_md_bdev = file;
1773 return NO_ERROR;
1774 }
1775
close_backing_dev(struct drbd_device * device,struct file * bdev_file,bool do_bd_unlink)1776 static void close_backing_dev(struct drbd_device *device,
1777 struct file *bdev_file, bool do_bd_unlink)
1778 {
1779 if (!bdev_file)
1780 return;
1781 if (do_bd_unlink)
1782 bd_unlink_disk_holder(file_bdev(bdev_file), device->vdisk);
1783 fput(bdev_file);
1784 }
1785
drbd_backing_dev_free(struct drbd_device * device,struct drbd_backing_dev * ldev)1786 void drbd_backing_dev_free(struct drbd_device *device, struct drbd_backing_dev *ldev)
1787 {
1788 if (ldev == NULL)
1789 return;
1790
1791 close_backing_dev(device, ldev->f_md_bdev,
1792 ldev->md_bdev != ldev->backing_bdev);
1793 close_backing_dev(device, ldev->backing_bdev_file, true);
1794
1795 kfree(ldev->disk_conf);
1796 kfree(ldev);
1797 }
1798
drbd_adm_attach(struct sk_buff * skb,struct genl_info * info)1799 int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1800 {
1801 struct drbd_config_context *adm_ctx = info->user_ptr[0];
1802 struct drbd_device *device;
1803 struct drbd_peer_device *peer_device;
1804 struct drbd_connection *connection;
1805 int err;
1806 enum drbd_ret_code retcode;
1807 enum determine_dev_size dd;
1808 sector_t max_possible_sectors;
1809 sector_t min_md_device_sectors;
1810 struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
1811 struct disk_conf *new_disk_conf = NULL;
1812 struct lru_cache *resync_lru = NULL;
1813 struct fifo_buffer *new_plan = NULL;
1814 union drbd_state ns, os;
1815 enum drbd_state_rv rv;
1816 struct net_conf *nc;
1817
1818 if (!adm_ctx->reply_skb)
1819 return 0;
1820 retcode = adm_ctx->reply_dh->ret_code;
1821 if (retcode != NO_ERROR)
1822 goto finish;
1823
1824 device = adm_ctx->device;
1825 mutex_lock(&adm_ctx->resource->adm_mutex);
1826 peer_device = first_peer_device(device);
1827 connection = peer_device->connection;
1828 conn_reconfig_start(connection);
1829
1830 /* if you want to reconfigure, please tear down first */
1831 if (device->state.disk > D_DISKLESS) {
1832 retcode = ERR_DISK_CONFIGURED;
1833 goto fail;
1834 }
1835 /* It may just now have detached because of IO error. Make sure
1836 * drbd_ldev_destroy is done already, we may end up here very fast,
1837 * e.g. if someone calls attach from the on-io-error handler,
1838 * to realize a "hot spare" feature (not that I'd recommend that) */
1839 wait_event(device->misc_wait, !test_bit(GOING_DISKLESS, &device->flags));
1840
1841 /* make sure there is no leftover from previous force-detach attempts */
1842 clear_bit(FORCE_DETACH, &device->flags);
1843 clear_bit(WAS_IO_ERROR, &device->flags);
1844 clear_bit(WAS_READ_ERROR, &device->flags);
1845
1846 /* and no leftover from previously aborted resync or verify, either */
1847 device->rs_total = 0;
1848 device->rs_failed = 0;
1849 atomic_set(&device->rs_pending_cnt, 0);
1850
1851 /* allocation not in the IO path, drbdsetup context */
1852 nbc = kzalloc_obj(struct drbd_backing_dev);
1853 if (!nbc) {
1854 retcode = ERR_NOMEM;
1855 goto fail;
1856 }
1857 spin_lock_init(&nbc->md.uuid_lock);
1858
1859 new_disk_conf = kzalloc_obj(struct disk_conf);
1860 if (!new_disk_conf) {
1861 retcode = ERR_NOMEM;
1862 goto fail;
1863 }
1864 nbc->disk_conf = new_disk_conf;
1865
1866 set_disk_conf_defaults(new_disk_conf);
1867 err = disk_conf_from_attrs(new_disk_conf, info);
1868 if (err) {
1869 retcode = ERR_MANDATORY_TAG;
1870 drbd_msg_put_info(adm_ctx->reply_skb, from_attrs_err_to_txt(err));
1871 goto fail;
1872 }
1873
1874 if (new_disk_conf->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
1875 new_disk_conf->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
1876
1877 new_plan = fifo_alloc((new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ);
1878 if (!new_plan) {
1879 retcode = ERR_NOMEM;
1880 goto fail;
1881 }
1882
1883 if (new_disk_conf->meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
1884 retcode = ERR_MD_IDX_INVALID;
1885 goto fail;
1886 }
1887
1888 rcu_read_lock();
1889 nc = rcu_dereference(connection->net_conf);
1890 if (nc) {
1891 if (new_disk_conf->fencing == FP_STONITH && nc->wire_protocol == DRBD_PROT_A) {
1892 rcu_read_unlock();
1893 retcode = ERR_STONITH_AND_PROT_A;
1894 goto fail;
1895 }
1896 }
1897 rcu_read_unlock();
1898
1899 retcode = open_backing_devices(device, new_disk_conf, nbc);
1900 if (retcode != NO_ERROR)
1901 goto fail;
1902
1903 if ((nbc->backing_bdev == nbc->md_bdev) !=
1904 (new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
1905 new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
1906 retcode = ERR_MD_IDX_INVALID;
1907 goto fail;
1908 }
1909
1910 resync_lru = lc_create("resync", drbd_bm_ext_cache,
1911 1, 61, sizeof(struct bm_extent),
1912 offsetof(struct bm_extent, lce));
1913 if (!resync_lru) {
1914 retcode = ERR_NOMEM;
1915 goto fail;
1916 }
1917
1918 /* Read our meta data super block early.
1919 * This also sets other on-disk offsets. */
1920 retcode = drbd_md_read(device, nbc);
1921 if (retcode != NO_ERROR)
1922 goto fail;
1923
1924 sanitize_disk_conf(device, new_disk_conf, nbc);
1925
1926 if (drbd_get_max_capacity(nbc) < new_disk_conf->disk_size) {
1927 drbd_err(device, "max capacity %llu smaller than disk size %llu\n",
1928 (unsigned long long) drbd_get_max_capacity(nbc),
1929 (unsigned long long) new_disk_conf->disk_size);
1930 retcode = ERR_DISK_TOO_SMALL;
1931 goto fail;
1932 }
1933
1934 if (new_disk_conf->meta_dev_idx < 0) {
1935 max_possible_sectors = DRBD_MAX_SECTORS_FLEX;
1936 /* at least one MB, otherwise it does not make sense */
1937 min_md_device_sectors = (2<<10);
1938 } else {
1939 max_possible_sectors = DRBD_MAX_SECTORS;
1940 min_md_device_sectors = MD_128MB_SECT * (new_disk_conf->meta_dev_idx + 1);
1941 }
1942
1943 if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
1944 retcode = ERR_MD_DISK_TOO_SMALL;
1945 drbd_warn(device, "refusing attach: md-device too small, "
1946 "at least %llu sectors needed for this meta-disk type\n",
1947 (unsigned long long) min_md_device_sectors);
1948 goto fail;
1949 }
1950
1951 /* Make sure the new disk is big enough
1952 * (we may currently be R_PRIMARY with no local disk...) */
1953 if (drbd_get_max_capacity(nbc) < get_capacity(device->vdisk)) {
1954 retcode = ERR_DISK_TOO_SMALL;
1955 goto fail;
1956 }
1957
1958 nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
1959
1960 if (nbc->known_size > max_possible_sectors) {
1961 drbd_warn(device, "==> truncating very big lower level device "
1962 "to currently maximum possible %llu sectors <==\n",
1963 (unsigned long long) max_possible_sectors);
1964 if (new_disk_conf->meta_dev_idx >= 0)
1965 drbd_warn(device, "==>> using internal or flexible "
1966 "meta data may help <<==\n");
1967 }
1968
1969 drbd_suspend_io(device);
1970 /* also wait for the last barrier ack. */
1971 /* FIXME see also https://daiquiri.linbit/cgi-bin/bugzilla/show_bug.cgi?id=171
1972 * We need a way to either ignore barrier acks for barriers sent before a device
1973 * was attached, or a way to wait for all pending barrier acks to come in.
1974 * As barriers are counted per resource,
1975 * we'd need to suspend io on all devices of a resource.
1976 */
1977 wait_event(device->misc_wait, !atomic_read(&device->ap_pending_cnt) || drbd_suspended(device));
1978 /* and for any other previously queued work */
1979 drbd_flush_workqueue(&connection->sender_work);
1980
1981 rv = _drbd_request_state(device, NS(disk, D_ATTACHING), CS_VERBOSE);
1982 retcode = (enum drbd_ret_code)rv;
1983 drbd_resume_io(device);
1984 if (rv < SS_SUCCESS)
1985 goto fail;
1986
1987 if (!get_ldev_if_state(device, D_ATTACHING))
1988 goto force_diskless;
1989
1990 if (!device->bitmap) {
1991 if (drbd_bm_init(device)) {
1992 retcode = ERR_NOMEM;
1993 goto force_diskless_dec;
1994 }
1995 }
1996
1997 if (device->state.pdsk != D_UP_TO_DATE && device->ed_uuid &&
1998 (device->state.role == R_PRIMARY || device->state.peer == R_PRIMARY) &&
1999 (device->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
2000 drbd_err(device, "Can only attach to data with current UUID=%016llX\n",
2001 (unsigned long long)device->ed_uuid);
2002 retcode = ERR_DATA_NOT_CURRENT;
2003 goto force_diskless_dec;
2004 }
2005
2006 /* Since we are diskless, fix the activity log first... */
2007 if (drbd_check_al_size(device, new_disk_conf)) {
2008 retcode = ERR_NOMEM;
2009 goto force_diskless_dec;
2010 }
2011
2012 /* Prevent shrinking of consistent devices ! */
2013 {
2014 unsigned long long nsz = drbd_new_dev_size(device, nbc, nbc->disk_conf->disk_size, 0);
2015 unsigned long long eff = nbc->md.la_size_sect;
2016 if (drbd_md_test_flag(nbc, MDF_CONSISTENT) && nsz < eff) {
2017 if (nsz == nbc->disk_conf->disk_size) {
2018 drbd_warn(device, "truncating a consistent device during attach (%llu < %llu)\n", nsz, eff);
2019 } else {
2020 drbd_warn(device, "refusing to truncate a consistent device (%llu < %llu)\n", nsz, eff);
2021 drbd_msg_sprintf_info(adm_ctx->reply_skb,
2022 "To-be-attached device has last effective > current size, and is consistent\n"
2023 "(%llu > %llu sectors). Refusing to attach.", eff, nsz);
2024 retcode = ERR_IMPLICIT_SHRINK;
2025 goto force_diskless_dec;
2026 }
2027 }
2028 }
2029
2030 lock_all_resources();
2031 retcode = drbd_resync_after_valid(device, new_disk_conf->resync_after);
2032 if (retcode != NO_ERROR) {
2033 unlock_all_resources();
2034 goto force_diskless_dec;
2035 }
2036
2037 /* Reset the "barriers don't work" bits here, then force meta data to
2038 * be written, to ensure we determine if barriers are supported. */
2039 if (new_disk_conf->md_flushes)
2040 clear_bit(MD_NO_FUA, &device->flags);
2041 else
2042 set_bit(MD_NO_FUA, &device->flags);
2043
2044 /* Point of no return reached.
2045 * Devices and memory are no longer released by error cleanup below.
2046 * now device takes over responsibility, and the state engine should
2047 * clean it up somewhere. */
2048 D_ASSERT(device, device->ldev == NULL);
2049 device->ldev = nbc;
2050 device->resync = resync_lru;
2051 device->rs_plan_s = new_plan;
2052 nbc = NULL;
2053 resync_lru = NULL;
2054 new_disk_conf = NULL;
2055 new_plan = NULL;
2056
2057 drbd_resync_after_changed(device);
2058 drbd_bump_write_ordering(device->resource, device->ldev, WO_BDEV_FLUSH);
2059 unlock_all_resources();
2060
2061 if (drbd_md_test_flag(device->ldev, MDF_CRASHED_PRIMARY))
2062 set_bit(CRASHED_PRIMARY, &device->flags);
2063 else
2064 clear_bit(CRASHED_PRIMARY, &device->flags);
2065
2066 if (drbd_md_test_flag(device->ldev, MDF_PRIMARY_IND) &&
2067 !(device->state.role == R_PRIMARY && device->resource->susp_nod))
2068 set_bit(CRASHED_PRIMARY, &device->flags);
2069
2070 device->send_cnt = 0;
2071 device->recv_cnt = 0;
2072 device->read_cnt = 0;
2073 device->writ_cnt = 0;
2074
2075 drbd_reconsider_queue_parameters(device, device->ldev, NULL);
2076
2077 /* If I am currently not R_PRIMARY,
2078 * but meta data primary indicator is set,
2079 * I just now recover from a hard crash,
2080 * and have been R_PRIMARY before that crash.
2081 *
2082 * Now, if I had no connection before that crash
2083 * (have been degraded R_PRIMARY), chances are that
2084 * I won't find my peer now either.
2085 *
2086 * In that case, and _only_ in that case,
2087 * we use the degr-wfc-timeout instead of the default,
2088 * so we can automatically recover from a crash of a
2089 * degraded but active "cluster" after a certain timeout.
2090 */
2091 clear_bit(USE_DEGR_WFC_T, &device->flags);
2092 if (device->state.role != R_PRIMARY &&
2093 drbd_md_test_flag(device->ldev, MDF_PRIMARY_IND) &&
2094 !drbd_md_test_flag(device->ldev, MDF_CONNECTED_IND))
2095 set_bit(USE_DEGR_WFC_T, &device->flags);
2096
2097 dd = drbd_determine_dev_size(device, 0, NULL);
2098 if (dd <= DS_ERROR) {
2099 retcode = ERR_NOMEM_BITMAP;
2100 goto force_diskless_dec;
2101 } else if (dd == DS_GREW)
2102 set_bit(RESYNC_AFTER_NEG, &device->flags);
2103
2104 if (drbd_md_test_flag(device->ldev, MDF_FULL_SYNC) ||
2105 (test_bit(CRASHED_PRIMARY, &device->flags) &&
2106 drbd_md_test_flag(device->ldev, MDF_AL_DISABLED))) {
2107 drbd_info(device, "Assuming that all blocks are out of sync "
2108 "(aka FullSync)\n");
2109 if (drbd_bitmap_io(device, &drbd_bmio_set_n_write,
2110 "set_n_write from attaching", BM_LOCKED_MASK,
2111 NULL)) {
2112 retcode = ERR_IO_MD_DISK;
2113 goto force_diskless_dec;
2114 }
2115 } else {
2116 if (drbd_bitmap_io(device, &drbd_bm_read,
2117 "read from attaching", BM_LOCKED_MASK,
2118 NULL)) {
2119 retcode = ERR_IO_MD_DISK;
2120 goto force_diskless_dec;
2121 }
2122 }
2123
2124 if (_drbd_bm_total_weight(device) == drbd_bm_bits(device))
2125 drbd_suspend_al(device); /* IO is still suspended here... */
2126
2127 spin_lock_irq(&device->resource->req_lock);
2128 os = drbd_read_state(device);
2129 ns = os;
2130 /* If MDF_CONSISTENT is not set go into inconsistent state,
2131 otherwise investigate MDF_WasUpToDate...
2132 If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
2133 otherwise into D_CONSISTENT state.
2134 */
2135 if (drbd_md_test_flag(device->ldev, MDF_CONSISTENT)) {
2136 if (drbd_md_test_flag(device->ldev, MDF_WAS_UP_TO_DATE))
2137 ns.disk = D_CONSISTENT;
2138 else
2139 ns.disk = D_OUTDATED;
2140 } else {
2141 ns.disk = D_INCONSISTENT;
2142 }
2143
2144 if (drbd_md_test_flag(device->ldev, MDF_PEER_OUT_DATED))
2145 ns.pdsk = D_OUTDATED;
2146
2147 rcu_read_lock();
2148 if (ns.disk == D_CONSISTENT &&
2149 (ns.pdsk == D_OUTDATED || rcu_dereference(device->ldev->disk_conf)->fencing == FP_DONT_CARE))
2150 ns.disk = D_UP_TO_DATE;
2151
2152 /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
2153 MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before
2154 this point, because drbd_request_state() modifies these
2155 flags. */
2156
2157 if (rcu_dereference(device->ldev->disk_conf)->al_updates)
2158 device->ldev->md.flags &= ~MDF_AL_DISABLED;
2159 else
2160 device->ldev->md.flags |= MDF_AL_DISABLED;
2161
2162 rcu_read_unlock();
2163
2164 /* In case we are C_CONNECTED postpone any decision on the new disk
2165 state after the negotiation phase. */
2166 if (device->state.conn == C_CONNECTED) {
2167 device->new_state_tmp.i = ns.i;
2168 ns.i = os.i;
2169 ns.disk = D_NEGOTIATING;
2170
2171 /* We expect to receive up-to-date UUIDs soon.
2172 To avoid a race in receive_state, free p_uuid while
2173 holding req_lock. I.e. atomic with the state change */
2174 kfree(device->p_uuid);
2175 device->p_uuid = NULL;
2176 }
2177
2178 rv = _drbd_set_state(device, ns, CS_VERBOSE, NULL);
2179 spin_unlock_irq(&device->resource->req_lock);
2180
2181 if (rv < SS_SUCCESS)
2182 goto force_diskless_dec;
2183
2184 mod_timer(&device->request_timer, jiffies + HZ);
2185
2186 if (device->state.role == R_PRIMARY)
2187 device->ldev->md.uuid[UI_CURRENT] |= (u64)1;
2188 else
2189 device->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
2190
2191 drbd_md_mark_dirty(device);
2192 drbd_md_sync(device);
2193
2194 kobject_uevent(&disk_to_dev(device->vdisk)->kobj, KOBJ_CHANGE);
2195 put_ldev(device);
2196 conn_reconfig_done(connection);
2197 mutex_unlock(&adm_ctx->resource->adm_mutex);
2198 adm_ctx->reply_dh->ret_code = retcode;
2199 return 0;
2200
2201 force_diskless_dec:
2202 put_ldev(device);
2203 force_diskless:
2204 drbd_force_state(device, NS(disk, D_DISKLESS));
2205 drbd_md_sync(device);
2206 fail:
2207 conn_reconfig_done(connection);
2208 if (nbc) {
2209 close_backing_dev(device, nbc->f_md_bdev,
2210 nbc->md_bdev != nbc->backing_bdev);
2211 close_backing_dev(device, nbc->backing_bdev_file, true);
2212 kfree(nbc);
2213 }
2214 kfree(new_disk_conf);
2215 lc_destroy(resync_lru);
2216 kfree(new_plan);
2217 mutex_unlock(&adm_ctx->resource->adm_mutex);
2218 finish:
2219 adm_ctx->reply_dh->ret_code = retcode;
2220 return 0;
2221 }
2222
adm_detach(struct drbd_device * device,int force)2223 static int adm_detach(struct drbd_device *device, int force)
2224 {
2225 if (force) {
2226 set_bit(FORCE_DETACH, &device->flags);
2227 drbd_force_state(device, NS(disk, D_FAILED));
2228 return SS_SUCCESS;
2229 }
2230
2231 return drbd_request_detach_interruptible(device);
2232 }
2233
2234 /* Detaching the disk is a process in multiple stages. First we need to lock
2235 * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
2236 * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
2237 * internal references as well.
2238 * Only then we have finally detached. */
drbd_adm_detach(struct sk_buff * skb,struct genl_info * info)2239 int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info)
2240 {
2241 struct drbd_config_context *adm_ctx = info->user_ptr[0];
2242 enum drbd_ret_code retcode;
2243 struct detach_parms parms = { };
2244 int err;
2245
2246 if (!adm_ctx->reply_skb)
2247 return 0;
2248 retcode = adm_ctx->reply_dh->ret_code;
2249 if (retcode != NO_ERROR)
2250 goto out;
2251
2252 if (info->attrs[DRBD_NLA_DETACH_PARMS]) {
2253 err = detach_parms_from_attrs(&parms, info);
2254 if (err) {
2255 retcode = ERR_MANDATORY_TAG;
2256 drbd_msg_put_info(adm_ctx->reply_skb, from_attrs_err_to_txt(err));
2257 goto out;
2258 }
2259 }
2260
2261 mutex_lock(&adm_ctx->resource->adm_mutex);
2262 retcode = adm_detach(adm_ctx->device, parms.force_detach);
2263 mutex_unlock(&adm_ctx->resource->adm_mutex);
2264 out:
2265 adm_ctx->reply_dh->ret_code = retcode;
2266 return 0;
2267 }
2268
conn_resync_running(struct drbd_connection * connection)2269 static bool conn_resync_running(struct drbd_connection *connection)
2270 {
2271 struct drbd_peer_device *peer_device;
2272 bool rv = false;
2273 int vnr;
2274
2275 rcu_read_lock();
2276 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
2277 struct drbd_device *device = peer_device->device;
2278 if (device->state.conn == C_SYNC_SOURCE ||
2279 device->state.conn == C_SYNC_TARGET ||
2280 device->state.conn == C_PAUSED_SYNC_S ||
2281 device->state.conn == C_PAUSED_SYNC_T) {
2282 rv = true;
2283 break;
2284 }
2285 }
2286 rcu_read_unlock();
2287
2288 return rv;
2289 }
2290
conn_ov_running(struct drbd_connection * connection)2291 static bool conn_ov_running(struct drbd_connection *connection)
2292 {
2293 struct drbd_peer_device *peer_device;
2294 bool rv = false;
2295 int vnr;
2296
2297 rcu_read_lock();
2298 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
2299 struct drbd_device *device = peer_device->device;
2300 if (device->state.conn == C_VERIFY_S ||
2301 device->state.conn == C_VERIFY_T) {
2302 rv = true;
2303 break;
2304 }
2305 }
2306 rcu_read_unlock();
2307
2308 return rv;
2309 }
2310
2311 static enum drbd_ret_code
_check_net_options(struct drbd_connection * connection,struct net_conf * old_net_conf,struct net_conf * new_net_conf)2312 _check_net_options(struct drbd_connection *connection, struct net_conf *old_net_conf, struct net_conf *new_net_conf)
2313 {
2314 struct drbd_peer_device *peer_device;
2315 int i;
2316
2317 if (old_net_conf && connection->cstate == C_WF_REPORT_PARAMS && connection->agreed_pro_version < 100) {
2318 if (new_net_conf->wire_protocol != old_net_conf->wire_protocol)
2319 return ERR_NEED_APV_100;
2320
2321 if (new_net_conf->two_primaries != old_net_conf->two_primaries)
2322 return ERR_NEED_APV_100;
2323
2324 if (strcmp(new_net_conf->integrity_alg, old_net_conf->integrity_alg))
2325 return ERR_NEED_APV_100;
2326 }
2327
2328 if (!new_net_conf->two_primaries &&
2329 conn_highest_role(connection) == R_PRIMARY &&
2330 conn_highest_peer(connection) == R_PRIMARY)
2331 return ERR_NEED_ALLOW_TWO_PRI;
2332
2333 if (new_net_conf->two_primaries &&
2334 (new_net_conf->wire_protocol != DRBD_PROT_C))
2335 return ERR_NOT_PROTO_C;
2336
2337 idr_for_each_entry(&connection->peer_devices, peer_device, i) {
2338 struct drbd_device *device = peer_device->device;
2339 if (get_ldev(device)) {
2340 enum drbd_fencing_p fp = rcu_dereference(device->ldev->disk_conf)->fencing;
2341 put_ldev(device);
2342 if (new_net_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH)
2343 return ERR_STONITH_AND_PROT_A;
2344 }
2345 if (device->state.role == R_PRIMARY && new_net_conf->discard_my_data)
2346 return ERR_DISCARD_IMPOSSIBLE;
2347 }
2348
2349 if (new_net_conf->on_congestion != OC_BLOCK && new_net_conf->wire_protocol != DRBD_PROT_A)
2350 return ERR_CONG_NOT_PROTO_A;
2351
2352 return NO_ERROR;
2353 }
2354
2355 static enum drbd_ret_code
check_net_options(struct drbd_connection * connection,struct net_conf * new_net_conf)2356 check_net_options(struct drbd_connection *connection, struct net_conf *new_net_conf)
2357 {
2358 enum drbd_ret_code rv;
2359 struct drbd_peer_device *peer_device;
2360 int i;
2361
2362 rcu_read_lock();
2363 rv = _check_net_options(connection, rcu_dereference(connection->net_conf), new_net_conf);
2364 rcu_read_unlock();
2365
2366 /* connection->peer_devices protected by genl_lock() here */
2367 idr_for_each_entry(&connection->peer_devices, peer_device, i) {
2368 struct drbd_device *device = peer_device->device;
2369 if (!device->bitmap) {
2370 if (drbd_bm_init(device))
2371 return ERR_NOMEM;
2372 }
2373 }
2374
2375 return rv;
2376 }
2377
2378 struct crypto {
2379 struct crypto_shash *verify_tfm;
2380 struct crypto_shash *csums_tfm;
2381 struct crypto_shash *cram_hmac_tfm;
2382 struct crypto_shash *integrity_tfm;
2383 };
2384
2385 static int
alloc_shash(struct crypto_shash ** tfm,char * tfm_name,int err_alg)2386 alloc_shash(struct crypto_shash **tfm, char *tfm_name, int err_alg)
2387 {
2388 if (!tfm_name[0])
2389 return NO_ERROR;
2390
2391 *tfm = crypto_alloc_shash(tfm_name, 0, 0);
2392 if (IS_ERR(*tfm)) {
2393 *tfm = NULL;
2394 return err_alg;
2395 }
2396
2397 return NO_ERROR;
2398 }
2399
2400 static enum drbd_ret_code
alloc_crypto(struct crypto * crypto,struct net_conf * new_net_conf)2401 alloc_crypto(struct crypto *crypto, struct net_conf *new_net_conf)
2402 {
2403 char hmac_name[CRYPTO_MAX_ALG_NAME];
2404 enum drbd_ret_code rv;
2405
2406 rv = alloc_shash(&crypto->csums_tfm, new_net_conf->csums_alg,
2407 ERR_CSUMS_ALG);
2408 if (rv != NO_ERROR)
2409 return rv;
2410 rv = alloc_shash(&crypto->verify_tfm, new_net_conf->verify_alg,
2411 ERR_VERIFY_ALG);
2412 if (rv != NO_ERROR)
2413 return rv;
2414 rv = alloc_shash(&crypto->integrity_tfm, new_net_conf->integrity_alg,
2415 ERR_INTEGRITY_ALG);
2416 if (rv != NO_ERROR)
2417 return rv;
2418 if (new_net_conf->cram_hmac_alg[0] != 0) {
2419 snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
2420 new_net_conf->cram_hmac_alg);
2421
2422 rv = alloc_shash(&crypto->cram_hmac_tfm, hmac_name,
2423 ERR_AUTH_ALG);
2424 }
2425
2426 return rv;
2427 }
2428
free_crypto(struct crypto * crypto)2429 static void free_crypto(struct crypto *crypto)
2430 {
2431 crypto_free_shash(crypto->cram_hmac_tfm);
2432 crypto_free_shash(crypto->integrity_tfm);
2433 crypto_free_shash(crypto->csums_tfm);
2434 crypto_free_shash(crypto->verify_tfm);
2435 }
2436
drbd_adm_net_opts(struct sk_buff * skb,struct genl_info * info)2437 int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
2438 {
2439 struct drbd_config_context *adm_ctx = info->user_ptr[0];
2440 enum drbd_ret_code retcode;
2441 struct drbd_connection *connection;
2442 struct net_conf *old_net_conf, *new_net_conf = NULL;
2443 int err;
2444 int ovr; /* online verify running */
2445 int rsr; /* re-sync running */
2446 struct crypto crypto = { };
2447
2448 if (!adm_ctx->reply_skb)
2449 return 0;
2450 retcode = adm_ctx->reply_dh->ret_code;
2451 if (retcode != NO_ERROR)
2452 goto finish;
2453
2454 connection = adm_ctx->connection;
2455 mutex_lock(&adm_ctx->resource->adm_mutex);
2456
2457 new_net_conf = kzalloc_obj(struct net_conf);
2458 if (!new_net_conf) {
2459 retcode = ERR_NOMEM;
2460 goto out;
2461 }
2462
2463 conn_reconfig_start(connection);
2464
2465 mutex_lock(&connection->data.mutex);
2466 mutex_lock(&connection->resource->conf_update);
2467 old_net_conf = connection->net_conf;
2468
2469 if (!old_net_conf) {
2470 drbd_msg_put_info(adm_ctx->reply_skb, "net conf missing, try connect");
2471 retcode = ERR_INVALID_REQUEST;
2472 goto fail;
2473 }
2474
2475 *new_net_conf = *old_net_conf;
2476 if (should_set_defaults(info))
2477 set_net_conf_defaults(new_net_conf);
2478
2479 err = net_conf_from_attrs_for_change(new_net_conf, info);
2480 if (err && err != -ENOMSG) {
2481 retcode = ERR_MANDATORY_TAG;
2482 drbd_msg_put_info(adm_ctx->reply_skb, from_attrs_err_to_txt(err));
2483 goto fail;
2484 }
2485
2486 retcode = check_net_options(connection, new_net_conf);
2487 if (retcode != NO_ERROR)
2488 goto fail;
2489
2490 /* re-sync running */
2491 rsr = conn_resync_running(connection);
2492 if (rsr && strcmp(new_net_conf->csums_alg, old_net_conf->csums_alg)) {
2493 retcode = ERR_CSUMS_RESYNC_RUNNING;
2494 goto fail;
2495 }
2496
2497 /* online verify running */
2498 ovr = conn_ov_running(connection);
2499 if (ovr && strcmp(new_net_conf->verify_alg, old_net_conf->verify_alg)) {
2500 retcode = ERR_VERIFY_RUNNING;
2501 goto fail;
2502 }
2503
2504 retcode = alloc_crypto(&crypto, new_net_conf);
2505 if (retcode != NO_ERROR)
2506 goto fail;
2507
2508 rcu_assign_pointer(connection->net_conf, new_net_conf);
2509
2510 if (!rsr) {
2511 crypto_free_shash(connection->csums_tfm);
2512 connection->csums_tfm = crypto.csums_tfm;
2513 crypto.csums_tfm = NULL;
2514 }
2515 if (!ovr) {
2516 crypto_free_shash(connection->verify_tfm);
2517 connection->verify_tfm = crypto.verify_tfm;
2518 crypto.verify_tfm = NULL;
2519 }
2520
2521 crypto_free_shash(connection->integrity_tfm);
2522 connection->integrity_tfm = crypto.integrity_tfm;
2523 if (connection->cstate >= C_WF_REPORT_PARAMS && connection->agreed_pro_version >= 100)
2524 /* Do this without trying to take connection->data.mutex again. */
2525 __drbd_send_protocol(connection, P_PROTOCOL_UPDATE);
2526
2527 crypto_free_shash(connection->cram_hmac_tfm);
2528 connection->cram_hmac_tfm = crypto.cram_hmac_tfm;
2529
2530 mutex_unlock(&connection->resource->conf_update);
2531 mutex_unlock(&connection->data.mutex);
2532 kvfree_rcu_mightsleep(old_net_conf);
2533
2534 if (connection->cstate >= C_WF_REPORT_PARAMS) {
2535 struct drbd_peer_device *peer_device;
2536 int vnr;
2537
2538 idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
2539 drbd_send_sync_param(peer_device);
2540 }
2541
2542 goto done;
2543
2544 fail:
2545 mutex_unlock(&connection->resource->conf_update);
2546 mutex_unlock(&connection->data.mutex);
2547 free_crypto(&crypto);
2548 kfree(new_net_conf);
2549 done:
2550 conn_reconfig_done(connection);
2551 out:
2552 mutex_unlock(&adm_ctx->resource->adm_mutex);
2553 finish:
2554 adm_ctx->reply_dh->ret_code = retcode;
2555 return 0;
2556 }
2557
connection_to_info(struct connection_info * info,struct drbd_connection * connection)2558 static void connection_to_info(struct connection_info *info,
2559 struct drbd_connection *connection)
2560 {
2561 info->conn_connection_state = connection->cstate;
2562 info->conn_role = conn_highest_peer(connection);
2563 }
2564
peer_device_to_info(struct peer_device_info * info,struct drbd_peer_device * peer_device)2565 static void peer_device_to_info(struct peer_device_info *info,
2566 struct drbd_peer_device *peer_device)
2567 {
2568 struct drbd_device *device = peer_device->device;
2569
2570 info->peer_repl_state =
2571 max_t(enum drbd_conns, C_WF_REPORT_PARAMS, device->state.conn);
2572 info->peer_disk_state = device->state.pdsk;
2573 info->peer_resync_susp_user = device->state.user_isp;
2574 info->peer_resync_susp_peer = device->state.peer_isp;
2575 info->peer_resync_susp_dependency = device->state.aftr_isp;
2576 }
2577
drbd_adm_connect(struct sk_buff * skb,struct genl_info * info)2578 int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
2579 {
2580 struct connection_info connection_info;
2581 enum drbd_notification_type flags;
2582 unsigned int peer_devices = 0;
2583 struct drbd_config_context *adm_ctx = info->user_ptr[0];
2584 struct drbd_peer_device *peer_device;
2585 struct net_conf *old_net_conf, *new_net_conf = NULL;
2586 struct crypto crypto = { };
2587 struct drbd_resource *resource;
2588 struct drbd_connection *connection;
2589 enum drbd_ret_code retcode;
2590 enum drbd_state_rv rv;
2591 int i;
2592 int err;
2593
2594 if (!adm_ctx->reply_skb)
2595 return 0;
2596 retcode = adm_ctx->reply_dh->ret_code;
2597 if (retcode != NO_ERROR)
2598 goto out;
2599 if (!(adm_ctx->my_addr && adm_ctx->peer_addr)) {
2600 drbd_msg_put_info(adm_ctx->reply_skb, "connection endpoint(s) missing");
2601 retcode = ERR_INVALID_REQUEST;
2602 goto out;
2603 }
2604
2605 /* No need for _rcu here. All reconfiguration is
2606 * strictly serialized on genl_lock(). We are protected against
2607 * concurrent reconfiguration/addition/deletion */
2608 for_each_resource(resource, &drbd_resources) {
2609 for_each_connection(connection, resource) {
2610 if (nla_len(adm_ctx->my_addr) == connection->my_addr_len &&
2611 !memcmp(nla_data(adm_ctx->my_addr), &connection->my_addr,
2612 connection->my_addr_len)) {
2613 retcode = ERR_LOCAL_ADDR;
2614 goto out;
2615 }
2616
2617 if (nla_len(adm_ctx->peer_addr) == connection->peer_addr_len &&
2618 !memcmp(nla_data(adm_ctx->peer_addr), &connection->peer_addr,
2619 connection->peer_addr_len)) {
2620 retcode = ERR_PEER_ADDR;
2621 goto out;
2622 }
2623 }
2624 }
2625
2626 mutex_lock(&adm_ctx->resource->adm_mutex);
2627 connection = first_connection(adm_ctx->resource);
2628 conn_reconfig_start(connection);
2629
2630 if (connection->cstate > C_STANDALONE) {
2631 retcode = ERR_NET_CONFIGURED;
2632 goto fail;
2633 }
2634
2635 /* allocation not in the IO path, drbdsetup / netlink process context */
2636 new_net_conf = kzalloc_obj(*new_net_conf);
2637 if (!new_net_conf) {
2638 retcode = ERR_NOMEM;
2639 goto fail;
2640 }
2641
2642 set_net_conf_defaults(new_net_conf);
2643
2644 err = net_conf_from_attrs(new_net_conf, info);
2645 if (err && err != -ENOMSG) {
2646 retcode = ERR_MANDATORY_TAG;
2647 drbd_msg_put_info(adm_ctx->reply_skb, from_attrs_err_to_txt(err));
2648 goto fail;
2649 }
2650
2651 retcode = check_net_options(connection, new_net_conf);
2652 if (retcode != NO_ERROR)
2653 goto fail;
2654
2655 retcode = alloc_crypto(&crypto, new_net_conf);
2656 if (retcode != NO_ERROR)
2657 goto fail;
2658
2659 ((char *)new_net_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
2660
2661 drbd_flush_workqueue(&connection->sender_work);
2662
2663 mutex_lock(&adm_ctx->resource->conf_update);
2664 old_net_conf = connection->net_conf;
2665 if (old_net_conf) {
2666 retcode = ERR_NET_CONFIGURED;
2667 mutex_unlock(&adm_ctx->resource->conf_update);
2668 goto fail;
2669 }
2670 rcu_assign_pointer(connection->net_conf, new_net_conf);
2671
2672 conn_free_crypto(connection);
2673 connection->cram_hmac_tfm = crypto.cram_hmac_tfm;
2674 connection->integrity_tfm = crypto.integrity_tfm;
2675 connection->csums_tfm = crypto.csums_tfm;
2676 connection->verify_tfm = crypto.verify_tfm;
2677
2678 connection->my_addr_len = nla_len(adm_ctx->my_addr);
2679 memcpy(&connection->my_addr, nla_data(adm_ctx->my_addr), connection->my_addr_len);
2680 connection->peer_addr_len = nla_len(adm_ctx->peer_addr);
2681 memcpy(&connection->peer_addr, nla_data(adm_ctx->peer_addr), connection->peer_addr_len);
2682
2683 idr_for_each_entry(&connection->peer_devices, peer_device, i) {
2684 peer_devices++;
2685 }
2686
2687 connection_to_info(&connection_info, connection);
2688 flags = (peer_devices--) ? NOTIFY_CONTINUES : 0;
2689 mutex_lock(¬ification_mutex);
2690 notify_connection_state(NULL, 0, connection, &connection_info, NOTIFY_CREATE | flags);
2691 idr_for_each_entry(&connection->peer_devices, peer_device, i) {
2692 struct peer_device_info peer_device_info;
2693
2694 peer_device_to_info(&peer_device_info, peer_device);
2695 flags = (peer_devices--) ? NOTIFY_CONTINUES : 0;
2696 notify_peer_device_state(NULL, 0, peer_device, &peer_device_info, NOTIFY_CREATE | flags);
2697 }
2698 mutex_unlock(¬ification_mutex);
2699 mutex_unlock(&adm_ctx->resource->conf_update);
2700
2701 rcu_read_lock();
2702 idr_for_each_entry(&connection->peer_devices, peer_device, i) {
2703 struct drbd_device *device = peer_device->device;
2704 device->send_cnt = 0;
2705 device->recv_cnt = 0;
2706 }
2707 rcu_read_unlock();
2708
2709 rv = conn_request_state(connection, NS(conn, C_UNCONNECTED), CS_VERBOSE);
2710
2711 conn_reconfig_done(connection);
2712 mutex_unlock(&adm_ctx->resource->adm_mutex);
2713 adm_ctx->reply_dh->ret_code = rv;
2714 return 0;
2715
2716 fail:
2717 free_crypto(&crypto);
2718 kfree(new_net_conf);
2719
2720 conn_reconfig_done(connection);
2721 mutex_unlock(&adm_ctx->resource->adm_mutex);
2722 out:
2723 adm_ctx->reply_dh->ret_code = retcode;
2724 return 0;
2725 }
2726
conn_try_disconnect(struct drbd_connection * connection,bool force)2727 static enum drbd_state_rv conn_try_disconnect(struct drbd_connection *connection, bool force)
2728 {
2729 enum drbd_conns cstate;
2730 enum drbd_state_rv rv;
2731
2732 repeat:
2733 rv = conn_request_state(connection, NS(conn, C_DISCONNECTING),
2734 force ? CS_HARD : 0);
2735
2736 switch (rv) {
2737 case SS_NOTHING_TO_DO:
2738 break;
2739 case SS_ALREADY_STANDALONE:
2740 return SS_SUCCESS;
2741 case SS_PRIMARY_NOP:
2742 /* Our state checking code wants to see the peer outdated. */
2743 rv = conn_request_state(connection, NS2(conn, C_DISCONNECTING, pdsk, D_OUTDATED), 0);
2744
2745 if (rv == SS_OUTDATE_WO_CONN) /* lost connection before graceful disconnect succeeded */
2746 rv = conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_VERBOSE);
2747
2748 break;
2749 case SS_CW_FAILED_BY_PEER:
2750 spin_lock_irq(&connection->resource->req_lock);
2751 cstate = connection->cstate;
2752 spin_unlock_irq(&connection->resource->req_lock);
2753 if (cstate <= C_WF_CONNECTION)
2754 goto repeat;
2755 /* The peer probably wants to see us outdated. */
2756 rv = conn_request_state(connection, NS2(conn, C_DISCONNECTING,
2757 disk, D_OUTDATED), 0);
2758 if (rv == SS_IS_DISKLESS || rv == SS_LOWER_THAN_OUTDATED) {
2759 rv = conn_request_state(connection, NS(conn, C_DISCONNECTING),
2760 CS_HARD);
2761 }
2762 break;
2763 default:;
2764 /* no special handling necessary */
2765 }
2766
2767 if (rv >= SS_SUCCESS) {
2768 enum drbd_state_rv rv2;
2769 /* No one else can reconfigure the network while I am here.
2770 * The state handling only uses drbd_thread_stop_nowait(),
2771 * we want to really wait here until the receiver is no more.
2772 */
2773 drbd_thread_stop(&connection->receiver);
2774
2775 /* Race breaker. This additional state change request may be
2776 * necessary, if this was a forced disconnect during a receiver
2777 * restart. We may have "killed" the receiver thread just
2778 * after drbd_receiver() returned. Typically, we should be
2779 * C_STANDALONE already, now, and this becomes a no-op.
2780 */
2781 rv2 = conn_request_state(connection, NS(conn, C_STANDALONE),
2782 CS_VERBOSE | CS_HARD);
2783 if (rv2 < SS_SUCCESS)
2784 drbd_err(connection,
2785 "unexpected rv2=%d in conn_try_disconnect()\n",
2786 rv2);
2787 /* Unlike in DRBD 9, the state engine has generated
2788 * NOTIFY_DESTROY events before clearing connection->net_conf. */
2789 }
2790 return rv;
2791 }
2792
drbd_adm_disconnect(struct sk_buff * skb,struct genl_info * info)2793 int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
2794 {
2795 struct drbd_config_context *adm_ctx = info->user_ptr[0];
2796 struct disconnect_parms parms;
2797 struct drbd_connection *connection;
2798 enum drbd_state_rv rv;
2799 enum drbd_ret_code retcode;
2800 int err;
2801
2802 if (!adm_ctx->reply_skb)
2803 return 0;
2804 retcode = adm_ctx->reply_dh->ret_code;
2805 if (retcode != NO_ERROR)
2806 goto fail;
2807
2808 connection = adm_ctx->connection;
2809 memset(&parms, 0, sizeof(parms));
2810 if (info->attrs[DRBD_NLA_DISCONNECT_PARMS]) {
2811 err = disconnect_parms_from_attrs(&parms, info);
2812 if (err) {
2813 retcode = ERR_MANDATORY_TAG;
2814 drbd_msg_put_info(adm_ctx->reply_skb, from_attrs_err_to_txt(err));
2815 goto fail;
2816 }
2817 }
2818
2819 mutex_lock(&adm_ctx->resource->adm_mutex);
2820 rv = conn_try_disconnect(connection, parms.force_disconnect);
2821 mutex_unlock(&adm_ctx->resource->adm_mutex);
2822 if (rv < SS_SUCCESS) {
2823 adm_ctx->reply_dh->ret_code = rv;
2824 return 0;
2825 }
2826 retcode = NO_ERROR;
2827 fail:
2828 adm_ctx->reply_dh->ret_code = retcode;
2829 return 0;
2830 }
2831
resync_after_online_grow(struct drbd_device * device)2832 void resync_after_online_grow(struct drbd_device *device)
2833 {
2834 int iass; /* I am sync source */
2835
2836 drbd_info(device, "Resync of new storage after online grow\n");
2837 if (device->state.role != device->state.peer)
2838 iass = (device->state.role == R_PRIMARY);
2839 else
2840 iass = test_bit(RESOLVE_CONFLICTS, &first_peer_device(device)->connection->flags);
2841
2842 if (iass)
2843 drbd_start_resync(device, C_SYNC_SOURCE);
2844 else
2845 _drbd_request_state(device, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
2846 }
2847
drbd_adm_resize(struct sk_buff * skb,struct genl_info * info)2848 int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
2849 {
2850 struct drbd_config_context *adm_ctx = info->user_ptr[0];
2851 struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
2852 struct resize_parms rs;
2853 struct drbd_device *device;
2854 enum drbd_ret_code retcode;
2855 enum determine_dev_size dd;
2856 bool change_al_layout = false;
2857 enum dds_flags ddsf;
2858 sector_t u_size;
2859 int err;
2860
2861 if (!adm_ctx->reply_skb)
2862 return 0;
2863 retcode = adm_ctx->reply_dh->ret_code;
2864 if (retcode != NO_ERROR)
2865 goto finish;
2866
2867 mutex_lock(&adm_ctx->resource->adm_mutex);
2868 device = adm_ctx->device;
2869 if (!get_ldev(device)) {
2870 retcode = ERR_NO_DISK;
2871 goto fail;
2872 }
2873
2874 memset(&rs, 0, sizeof(struct resize_parms));
2875 rs.al_stripes = device->ldev->md.al_stripes;
2876 rs.al_stripe_size = device->ldev->md.al_stripe_size_4k * 4;
2877 if (info->attrs[DRBD_NLA_RESIZE_PARMS]) {
2878 err = resize_parms_from_attrs(&rs, info);
2879 if (err) {
2880 retcode = ERR_MANDATORY_TAG;
2881 drbd_msg_put_info(adm_ctx->reply_skb, from_attrs_err_to_txt(err));
2882 goto fail_ldev;
2883 }
2884 }
2885
2886 if (device->state.conn > C_CONNECTED) {
2887 retcode = ERR_RESIZE_RESYNC;
2888 goto fail_ldev;
2889 }
2890
2891 if (device->state.role == R_SECONDARY &&
2892 device->state.peer == R_SECONDARY) {
2893 retcode = ERR_NO_PRIMARY;
2894 goto fail_ldev;
2895 }
2896
2897 if (rs.no_resync && first_peer_device(device)->connection->agreed_pro_version < 93) {
2898 retcode = ERR_NEED_APV_93;
2899 goto fail_ldev;
2900 }
2901
2902 rcu_read_lock();
2903 u_size = rcu_dereference(device->ldev->disk_conf)->disk_size;
2904 rcu_read_unlock();
2905 if (u_size != (sector_t)rs.resize_size) {
2906 new_disk_conf = kmalloc_obj(struct disk_conf);
2907 if (!new_disk_conf) {
2908 retcode = ERR_NOMEM;
2909 goto fail_ldev;
2910 }
2911 }
2912
2913 if (device->ldev->md.al_stripes != rs.al_stripes ||
2914 device->ldev->md.al_stripe_size_4k != rs.al_stripe_size / 4) {
2915 u32 al_size_k = rs.al_stripes * rs.al_stripe_size;
2916
2917 if (al_size_k > (16 * 1024 * 1024)) {
2918 retcode = ERR_MD_LAYOUT_TOO_BIG;
2919 goto fail_ldev;
2920 }
2921
2922 if (al_size_k < MD_32kB_SECT/2) {
2923 retcode = ERR_MD_LAYOUT_TOO_SMALL;
2924 goto fail_ldev;
2925 }
2926
2927 if (device->state.conn != C_CONNECTED && !rs.resize_force) {
2928 retcode = ERR_MD_LAYOUT_CONNECTED;
2929 goto fail_ldev;
2930 }
2931
2932 change_al_layout = true;
2933 }
2934
2935 if (device->ldev->known_size != drbd_get_capacity(device->ldev->backing_bdev))
2936 device->ldev->known_size = drbd_get_capacity(device->ldev->backing_bdev);
2937
2938 if (new_disk_conf) {
2939 mutex_lock(&device->resource->conf_update);
2940 old_disk_conf = device->ldev->disk_conf;
2941 *new_disk_conf = *old_disk_conf;
2942 new_disk_conf->disk_size = (sector_t)rs.resize_size;
2943 rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
2944 mutex_unlock(&device->resource->conf_update);
2945 kvfree_rcu_mightsleep(old_disk_conf);
2946 new_disk_conf = NULL;
2947 }
2948
2949 ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
2950 dd = drbd_determine_dev_size(device, ddsf, change_al_layout ? &rs : NULL);
2951 drbd_md_sync(device);
2952 put_ldev(device);
2953 if (dd == DS_ERROR) {
2954 retcode = ERR_NOMEM_BITMAP;
2955 goto fail;
2956 } else if (dd == DS_ERROR_SPACE_MD) {
2957 retcode = ERR_MD_LAYOUT_NO_FIT;
2958 goto fail;
2959 } else if (dd == DS_ERROR_SHRINK) {
2960 retcode = ERR_IMPLICIT_SHRINK;
2961 goto fail;
2962 }
2963
2964 if (device->state.conn == C_CONNECTED) {
2965 if (dd == DS_GREW)
2966 set_bit(RESIZE_PENDING, &device->flags);
2967
2968 drbd_send_uuids(first_peer_device(device));
2969 drbd_send_sizes(first_peer_device(device), 1, ddsf);
2970 }
2971
2972 fail:
2973 mutex_unlock(&adm_ctx->resource->adm_mutex);
2974 finish:
2975 adm_ctx->reply_dh->ret_code = retcode;
2976 return 0;
2977
2978 fail_ldev:
2979 put_ldev(device);
2980 kfree(new_disk_conf);
2981 goto fail;
2982 }
2983
drbd_adm_resource_opts(struct sk_buff * skb,struct genl_info * info)2984 int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
2985 {
2986 struct drbd_config_context *adm_ctx = info->user_ptr[0];
2987 enum drbd_ret_code retcode;
2988 struct res_opts res_opts;
2989 int err;
2990
2991 if (!adm_ctx->reply_skb)
2992 return 0;
2993 retcode = adm_ctx->reply_dh->ret_code;
2994 if (retcode != NO_ERROR)
2995 goto fail;
2996
2997 res_opts = adm_ctx->resource->res_opts;
2998 if (should_set_defaults(info))
2999 set_res_opts_defaults(&res_opts);
3000
3001 err = res_opts_from_attrs(&res_opts, info);
3002 if (err && err != -ENOMSG) {
3003 retcode = ERR_MANDATORY_TAG;
3004 drbd_msg_put_info(adm_ctx->reply_skb, from_attrs_err_to_txt(err));
3005 goto fail;
3006 }
3007
3008 mutex_lock(&adm_ctx->resource->adm_mutex);
3009 err = set_resource_options(adm_ctx->resource, &res_opts);
3010 if (err) {
3011 retcode = ERR_INVALID_REQUEST;
3012 if (err == -ENOMEM)
3013 retcode = ERR_NOMEM;
3014 }
3015 mutex_unlock(&adm_ctx->resource->adm_mutex);
3016
3017 fail:
3018 adm_ctx->reply_dh->ret_code = retcode;
3019 return 0;
3020 }
3021
drbd_adm_invalidate(struct sk_buff * skb,struct genl_info * info)3022 int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
3023 {
3024 struct drbd_config_context *adm_ctx = info->user_ptr[0];
3025 struct drbd_device *device;
3026 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
3027
3028 if (!adm_ctx->reply_skb)
3029 return 0;
3030 retcode = adm_ctx->reply_dh->ret_code;
3031 if (retcode != NO_ERROR)
3032 goto out;
3033
3034 device = adm_ctx->device;
3035 if (!get_ldev(device)) {
3036 retcode = ERR_NO_DISK;
3037 goto out;
3038 }
3039
3040 mutex_lock(&adm_ctx->resource->adm_mutex);
3041
3042 /* If there is still bitmap IO pending, probably because of a previous
3043 * resync just being finished, wait for it before requesting a new resync.
3044 * Also wait for it's after_state_ch(). */
3045 drbd_suspend_io(device);
3046 wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags));
3047 drbd_flush_workqueue(&first_peer_device(device)->connection->sender_work);
3048
3049 /* If we happen to be C_STANDALONE R_SECONDARY, just change to
3050 * D_INCONSISTENT, and set all bits in the bitmap. Otherwise,
3051 * try to start a resync handshake as sync target for full sync.
3052 */
3053 if (device->state.conn == C_STANDALONE && device->state.role == R_SECONDARY) {
3054 retcode = drbd_request_state(device, NS(disk, D_INCONSISTENT));
3055 if (retcode >= SS_SUCCESS) {
3056 if (drbd_bitmap_io(device, &drbd_bmio_set_n_write,
3057 "set_n_write from invalidate", BM_LOCKED_MASK, NULL))
3058 retcode = ERR_IO_MD_DISK;
3059 }
3060 } else
3061 retcode = drbd_request_state(device, NS(conn, C_STARTING_SYNC_T));
3062 drbd_resume_io(device);
3063 mutex_unlock(&adm_ctx->resource->adm_mutex);
3064 put_ldev(device);
3065 out:
3066 adm_ctx->reply_dh->ret_code = retcode;
3067 return 0;
3068 }
3069
drbd_adm_simple_request_state(struct sk_buff * skb,struct genl_info * info,union drbd_state mask,union drbd_state val)3070 static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *info,
3071 union drbd_state mask, union drbd_state val)
3072 {
3073 struct drbd_config_context *adm_ctx = info->user_ptr[0];
3074 enum drbd_ret_code retcode;
3075
3076 if (!adm_ctx->reply_skb)
3077 return 0;
3078 retcode = adm_ctx->reply_dh->ret_code;
3079 if (retcode != NO_ERROR)
3080 goto out;
3081
3082 mutex_lock(&adm_ctx->resource->adm_mutex);
3083 retcode = drbd_request_state(adm_ctx->device, mask, val);
3084 mutex_unlock(&adm_ctx->resource->adm_mutex);
3085 out:
3086 adm_ctx->reply_dh->ret_code = retcode;
3087 return 0;
3088 }
3089
drbd_bmio_set_susp_al(struct drbd_device * device,struct drbd_peer_device * peer_device)3090 static int drbd_bmio_set_susp_al(struct drbd_device *device,
3091 struct drbd_peer_device *peer_device) __must_hold(local)
3092 {
3093 int rv;
3094
3095 rv = drbd_bmio_set_n_write(device, peer_device);
3096 drbd_suspend_al(device);
3097 return rv;
3098 }
3099
drbd_adm_invalidate_peer(struct sk_buff * skb,struct genl_info * info)3100 int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
3101 {
3102 struct drbd_config_context *adm_ctx = info->user_ptr[0];
3103 int retcode; /* drbd_ret_code, drbd_state_rv */
3104 struct drbd_device *device;
3105
3106 if (!adm_ctx->reply_skb)
3107 return 0;
3108 retcode = adm_ctx->reply_dh->ret_code;
3109 if (retcode != NO_ERROR)
3110 goto out;
3111
3112 device = adm_ctx->device;
3113 if (!get_ldev(device)) {
3114 retcode = ERR_NO_DISK;
3115 goto out;
3116 }
3117
3118 mutex_lock(&adm_ctx->resource->adm_mutex);
3119
3120 /* If there is still bitmap IO pending, probably because of a previous
3121 * resync just being finished, wait for it before requesting a new resync.
3122 * Also wait for it's after_state_ch(). */
3123 drbd_suspend_io(device);
3124 wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags));
3125 drbd_flush_workqueue(&first_peer_device(device)->connection->sender_work);
3126
3127 /* If we happen to be C_STANDALONE R_PRIMARY, just set all bits
3128 * in the bitmap. Otherwise, try to start a resync handshake
3129 * as sync source for full sync.
3130 */
3131 if (device->state.conn == C_STANDALONE && device->state.role == R_PRIMARY) {
3132 /* The peer will get a resync upon connect anyways. Just make that
3133 into a full resync. */
3134 retcode = drbd_request_state(device, NS(pdsk, D_INCONSISTENT));
3135 if (retcode >= SS_SUCCESS) {
3136 if (drbd_bitmap_io(device, &drbd_bmio_set_susp_al,
3137 "set_n_write from invalidate_peer",
3138 BM_LOCKED_SET_ALLOWED, NULL))
3139 retcode = ERR_IO_MD_DISK;
3140 }
3141 } else
3142 retcode = drbd_request_state(device, NS(conn, C_STARTING_SYNC_S));
3143 drbd_resume_io(device);
3144 mutex_unlock(&adm_ctx->resource->adm_mutex);
3145 put_ldev(device);
3146 out:
3147 adm_ctx->reply_dh->ret_code = retcode;
3148 return 0;
3149 }
3150
drbd_adm_pause_sync(struct sk_buff * skb,struct genl_info * info)3151 int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info)
3152 {
3153 struct drbd_config_context *adm_ctx = info->user_ptr[0];
3154 enum drbd_ret_code retcode;
3155
3156 if (!adm_ctx->reply_skb)
3157 return 0;
3158 retcode = adm_ctx->reply_dh->ret_code;
3159 if (retcode != NO_ERROR)
3160 goto out;
3161
3162 mutex_lock(&adm_ctx->resource->adm_mutex);
3163 if (drbd_request_state(adm_ctx->device, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
3164 retcode = ERR_PAUSE_IS_SET;
3165 mutex_unlock(&adm_ctx->resource->adm_mutex);
3166 out:
3167 adm_ctx->reply_dh->ret_code = retcode;
3168 return 0;
3169 }
3170
drbd_adm_resume_sync(struct sk_buff * skb,struct genl_info * info)3171 int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info)
3172 {
3173 struct drbd_config_context *adm_ctx = info->user_ptr[0];
3174 union drbd_dev_state s;
3175 enum drbd_ret_code retcode;
3176
3177 if (!adm_ctx->reply_skb)
3178 return 0;
3179 retcode = adm_ctx->reply_dh->ret_code;
3180 if (retcode != NO_ERROR)
3181 goto out;
3182
3183 mutex_lock(&adm_ctx->resource->adm_mutex);
3184 if (drbd_request_state(adm_ctx->device, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
3185 s = adm_ctx->device->state;
3186 if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) {
3187 retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP :
3188 s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR;
3189 } else {
3190 retcode = ERR_PAUSE_IS_CLEAR;
3191 }
3192 }
3193 mutex_unlock(&adm_ctx->resource->adm_mutex);
3194 out:
3195 adm_ctx->reply_dh->ret_code = retcode;
3196 return 0;
3197 }
3198
drbd_adm_suspend_io(struct sk_buff * skb,struct genl_info * info)3199 int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info)
3200 {
3201 return drbd_adm_simple_request_state(skb, info, NS(susp, 1));
3202 }
3203
drbd_adm_resume_io(struct sk_buff * skb,struct genl_info * info)3204 int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
3205 {
3206 struct drbd_config_context *adm_ctx = info->user_ptr[0];
3207 struct drbd_device *device;
3208 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
3209
3210 if (!adm_ctx->reply_skb)
3211 return 0;
3212 retcode = adm_ctx->reply_dh->ret_code;
3213 if (retcode != NO_ERROR)
3214 goto out;
3215
3216 mutex_lock(&adm_ctx->resource->adm_mutex);
3217 device = adm_ctx->device;
3218 if (test_bit(NEW_CUR_UUID, &device->flags)) {
3219 if (get_ldev_if_state(device, D_ATTACHING)) {
3220 drbd_uuid_new_current(device);
3221 put_ldev(device);
3222 } else {
3223 /* This is effectively a multi-stage "forced down".
3224 * The NEW_CUR_UUID bit is supposedly only set, if we
3225 * lost the replication connection, and are configured
3226 * to freeze IO and wait for some fence-peer handler.
3227 * So we still don't have a replication connection.
3228 * And now we don't have a local disk either. After
3229 * resume, we will fail all pending and new IO, because
3230 * we don't have any data anymore. Which means we will
3231 * eventually be able to terminate all users of this
3232 * device, and then take it down. By bumping the
3233 * "effective" data uuid, we make sure that you really
3234 * need to tear down before you reconfigure, we will
3235 * the refuse to re-connect or re-attach (because no
3236 * matching real data uuid exists).
3237 */
3238 u64 val;
3239 val = get_random_u64();
3240 drbd_set_ed_uuid(device, val);
3241 drbd_warn(device, "Resumed without access to data; please tear down before attempting to re-configure.\n");
3242 }
3243 clear_bit(NEW_CUR_UUID, &device->flags);
3244 }
3245 drbd_suspend_io(device);
3246 retcode = drbd_request_state(device, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
3247 if (retcode == SS_SUCCESS) {
3248 if (device->state.conn < C_CONNECTED)
3249 tl_clear(first_peer_device(device)->connection);
3250 if (device->state.disk == D_DISKLESS || device->state.disk == D_FAILED)
3251 tl_restart(first_peer_device(device)->connection, FAIL_FROZEN_DISK_IO);
3252 }
3253 drbd_resume_io(device);
3254 mutex_unlock(&adm_ctx->resource->adm_mutex);
3255 out:
3256 adm_ctx->reply_dh->ret_code = retcode;
3257 return 0;
3258 }
3259
drbd_adm_outdate(struct sk_buff * skb,struct genl_info * info)3260 int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info)
3261 {
3262 return drbd_adm_simple_request_state(skb, info, NS(disk, D_OUTDATED));
3263 }
3264
nla_put_drbd_cfg_context(struct sk_buff * skb,struct drbd_resource * resource,struct drbd_connection * connection,struct drbd_device * device)3265 static int nla_put_drbd_cfg_context(struct sk_buff *skb,
3266 struct drbd_resource *resource,
3267 struct drbd_connection *connection,
3268 struct drbd_device *device)
3269 {
3270 struct nlattr *nla;
3271 nla = nla_nest_start_noflag(skb, DRBD_NLA_CFG_CONTEXT);
3272 if (!nla)
3273 goto nla_put_failure;
3274 if (device &&
3275 nla_put_u32(skb, T_ctx_volume, device->vnr))
3276 goto nla_put_failure;
3277 if (nla_put_string(skb, T_ctx_resource_name, resource->name))
3278 goto nla_put_failure;
3279 if (connection) {
3280 if (connection->my_addr_len &&
3281 nla_put(skb, T_ctx_my_addr, connection->my_addr_len, &connection->my_addr))
3282 goto nla_put_failure;
3283 if (connection->peer_addr_len &&
3284 nla_put(skb, T_ctx_peer_addr, connection->peer_addr_len, &connection->peer_addr))
3285 goto nla_put_failure;
3286 }
3287 nla_nest_end(skb, nla);
3288 return 0;
3289
3290 nla_put_failure:
3291 if (nla)
3292 nla_nest_cancel(skb, nla);
3293 return -EMSGSIZE;
3294 }
3295
3296 /*
3297 * The generic netlink dump callbacks are called outside the genl_lock(), so
3298 * they cannot use the simple attribute parsing code which uses global
3299 * attribute tables.
3300 */
find_cfg_context_attr(const struct nlmsghdr * nlh,int attr)3301 static struct nlattr *find_cfg_context_attr(const struct nlmsghdr *nlh, int attr)
3302 {
3303 const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ;
3304 struct nlattr *nla;
3305
3306 nla = nla_find(nlmsg_attrdata(nlh, hdrlen), nlmsg_attrlen(nlh, hdrlen),
3307 DRBD_NLA_CFG_CONTEXT);
3308 if (!nla)
3309 return NULL;
3310 return nla_find_nested(nla, attr);
3311 }
3312
3313 static void resource_to_info(struct resource_info *, struct drbd_resource *);
3314
drbd_adm_dump_resources(struct sk_buff * skb,struct netlink_callback * cb)3315 int drbd_adm_dump_resources(struct sk_buff *skb, struct netlink_callback *cb)
3316 {
3317 struct drbd_genlmsghdr *dh;
3318 struct drbd_resource *resource;
3319 struct resource_info resource_info;
3320 struct resource_statistics resource_statistics;
3321 int err;
3322
3323 rcu_read_lock();
3324 if (cb->args[0]) {
3325 for_each_resource_rcu(resource, &drbd_resources)
3326 if (resource == (struct drbd_resource *)cb->args[0])
3327 goto found_resource;
3328 err = 0; /* resource was probably deleted */
3329 goto out;
3330 }
3331 resource = list_entry(&drbd_resources,
3332 struct drbd_resource, resources);
3333
3334 found_resource:
3335 list_for_each_entry_continue_rcu(resource, &drbd_resources, resources) {
3336 goto put_result;
3337 }
3338 err = 0;
3339 goto out;
3340
3341 put_result:
3342 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
3343 cb->nlh->nlmsg_seq, &drbd_genl_family,
3344 NLM_F_MULTI, DRBD_ADM_GET_RESOURCES);
3345 err = -ENOMEM;
3346 if (!dh)
3347 goto out;
3348 dh->minor = -1U;
3349 dh->ret_code = NO_ERROR;
3350 err = nla_put_drbd_cfg_context(skb, resource, NULL, NULL);
3351 if (err)
3352 goto out;
3353 err = res_opts_to_skb(skb, &resource->res_opts, !capable(CAP_SYS_ADMIN));
3354 if (err)
3355 goto out;
3356 resource_to_info(&resource_info, resource);
3357 err = resource_info_to_skb(skb, &resource_info, !capable(CAP_SYS_ADMIN));
3358 if (err)
3359 goto out;
3360 resource_statistics.res_stat_write_ordering = resource->write_ordering;
3361 err = resource_statistics_to_skb(skb, &resource_statistics, !capable(CAP_SYS_ADMIN));
3362 if (err)
3363 goto out;
3364 cb->args[0] = (long)resource;
3365 genlmsg_end(skb, dh);
3366 err = 0;
3367
3368 out:
3369 rcu_read_unlock();
3370 if (err)
3371 return err;
3372 return skb->len;
3373 }
3374
device_to_statistics(struct device_statistics * s,struct drbd_device * device)3375 static void device_to_statistics(struct device_statistics *s,
3376 struct drbd_device *device)
3377 {
3378 memset(s, 0, sizeof(*s));
3379 s->dev_upper_blocked = !may_inc_ap_bio(device);
3380 if (get_ldev(device)) {
3381 struct drbd_md *md = &device->ldev->md;
3382 u64 *history_uuids = (u64 *)s->history_uuids;
3383 int n;
3384
3385 spin_lock_irq(&md->uuid_lock);
3386 s->dev_current_uuid = md->uuid[UI_CURRENT];
3387 BUILD_BUG_ON(sizeof(s->history_uuids) < UI_HISTORY_END - UI_HISTORY_START + 1);
3388 for (n = 0; n < UI_HISTORY_END - UI_HISTORY_START + 1; n++)
3389 history_uuids[n] = md->uuid[UI_HISTORY_START + n];
3390 for (; n < HISTORY_UUIDS; n++)
3391 history_uuids[n] = 0;
3392 s->history_uuids_len = HISTORY_UUIDS;
3393 spin_unlock_irq(&md->uuid_lock);
3394
3395 s->dev_disk_flags = md->flags;
3396 put_ldev(device);
3397 }
3398 s->dev_size = get_capacity(device->vdisk);
3399 s->dev_read = device->read_cnt;
3400 s->dev_write = device->writ_cnt;
3401 s->dev_al_writes = device->al_writ_cnt;
3402 s->dev_bm_writes = device->bm_writ_cnt;
3403 s->dev_upper_pending = atomic_read(&device->ap_bio_cnt);
3404 s->dev_lower_pending = atomic_read(&device->local_cnt);
3405 s->dev_al_suspended = test_bit(AL_SUSPENDED, &device->flags);
3406 s->dev_exposed_data_uuid = device->ed_uuid;
3407 }
3408
put_resource_in_arg0(struct netlink_callback * cb,int holder_nr)3409 static int put_resource_in_arg0(struct netlink_callback *cb, int holder_nr)
3410 {
3411 if (cb->args[0]) {
3412 struct drbd_resource *resource =
3413 (struct drbd_resource *)cb->args[0];
3414 kref_put(&resource->kref, drbd_destroy_resource);
3415 }
3416
3417 return 0;
3418 }
3419
drbd_adm_dump_devices_done(struct netlink_callback * cb)3420 int drbd_adm_dump_devices_done(struct netlink_callback *cb) {
3421 return put_resource_in_arg0(cb, 7);
3422 }
3423
3424 static void device_to_info(struct device_info *, struct drbd_device *);
3425
drbd_adm_dump_devices(struct sk_buff * skb,struct netlink_callback * cb)3426 int drbd_adm_dump_devices(struct sk_buff *skb, struct netlink_callback *cb)
3427 {
3428 struct nlattr *resource_filter;
3429 struct drbd_resource *resource;
3430 struct drbd_device *device;
3431 int minor, err, retcode;
3432 struct drbd_genlmsghdr *dh;
3433 struct device_info device_info;
3434 struct device_statistics device_statistics;
3435 struct idr *idr_to_search;
3436
3437 resource = (struct drbd_resource *)cb->args[0];
3438 if (!cb->args[0] && !cb->args[1]) {
3439 resource_filter = find_cfg_context_attr(cb->nlh, T_ctx_resource_name);
3440 if (resource_filter) {
3441 retcode = ERR_RES_NOT_KNOWN;
3442 resource = drbd_find_resource(nla_data(resource_filter));
3443 if (!resource) {
3444 rcu_read_lock();
3445 goto put_result;
3446 }
3447 cb->args[0] = (long)resource;
3448 }
3449 }
3450
3451 rcu_read_lock();
3452 minor = cb->args[1];
3453 idr_to_search = resource ? &resource->devices : &drbd_devices;
3454 device = idr_get_next(idr_to_search, &minor);
3455 if (!device) {
3456 err = 0;
3457 goto out;
3458 }
3459 idr_for_each_entry_continue(idr_to_search, device, minor) {
3460 retcode = NO_ERROR;
3461 goto put_result; /* only one iteration */
3462 }
3463 err = 0;
3464 goto out; /* no more devices */
3465
3466 put_result:
3467 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
3468 cb->nlh->nlmsg_seq, &drbd_genl_family,
3469 NLM_F_MULTI, DRBD_ADM_GET_DEVICES);
3470 err = -ENOMEM;
3471 if (!dh)
3472 goto out;
3473 dh->ret_code = retcode;
3474 dh->minor = -1U;
3475 if (retcode == NO_ERROR) {
3476 dh->minor = device->minor;
3477 err = nla_put_drbd_cfg_context(skb, device->resource, NULL, device);
3478 if (err)
3479 goto out;
3480 if (get_ldev(device)) {
3481 struct disk_conf *disk_conf =
3482 rcu_dereference(device->ldev->disk_conf);
3483
3484 err = disk_conf_to_skb(skb, disk_conf, !capable(CAP_SYS_ADMIN));
3485 put_ldev(device);
3486 if (err)
3487 goto out;
3488 }
3489 device_to_info(&device_info, device);
3490 err = device_info_to_skb(skb, &device_info, !capable(CAP_SYS_ADMIN));
3491 if (err)
3492 goto out;
3493
3494 device_to_statistics(&device_statistics, device);
3495 err = device_statistics_to_skb(skb, &device_statistics, !capable(CAP_SYS_ADMIN));
3496 if (err)
3497 goto out;
3498 cb->args[1] = minor + 1;
3499 }
3500 genlmsg_end(skb, dh);
3501 err = 0;
3502
3503 out:
3504 rcu_read_unlock();
3505 if (err)
3506 return err;
3507 return skb->len;
3508 }
3509
drbd_adm_dump_connections_done(struct netlink_callback * cb)3510 int drbd_adm_dump_connections_done(struct netlink_callback *cb)
3511 {
3512 return put_resource_in_arg0(cb, 6);
3513 }
3514
3515 enum { SINGLE_RESOURCE, ITERATE_RESOURCES };
3516
drbd_adm_dump_connections(struct sk_buff * skb,struct netlink_callback * cb)3517 int drbd_adm_dump_connections(struct sk_buff *skb, struct netlink_callback *cb)
3518 {
3519 struct nlattr *resource_filter;
3520 struct drbd_resource *resource = NULL, *next_resource;
3521 struct drbd_connection *connection;
3522 int err = 0, retcode;
3523 struct drbd_genlmsghdr *dh;
3524 struct connection_info connection_info;
3525 struct connection_statistics connection_statistics;
3526
3527 rcu_read_lock();
3528 resource = (struct drbd_resource *)cb->args[0];
3529 if (!cb->args[0]) {
3530 resource_filter = find_cfg_context_attr(cb->nlh, T_ctx_resource_name);
3531 if (resource_filter) {
3532 retcode = ERR_RES_NOT_KNOWN;
3533 resource = drbd_find_resource(nla_data(resource_filter));
3534 if (!resource)
3535 goto put_result;
3536 cb->args[0] = (long)resource;
3537 cb->args[1] = SINGLE_RESOURCE;
3538 }
3539 }
3540 if (!resource) {
3541 if (list_empty(&drbd_resources))
3542 goto out;
3543 resource = list_first_entry(&drbd_resources, struct drbd_resource, resources);
3544 kref_get(&resource->kref);
3545 cb->args[0] = (long)resource;
3546 cb->args[1] = ITERATE_RESOURCES;
3547 }
3548
3549 next_resource:
3550 rcu_read_unlock();
3551 mutex_lock(&resource->conf_update);
3552 rcu_read_lock();
3553 if (cb->args[2]) {
3554 for_each_connection_rcu(connection, resource)
3555 if (connection == (struct drbd_connection *)cb->args[2])
3556 goto found_connection;
3557 /* connection was probably deleted */
3558 goto no_more_connections;
3559 }
3560 connection = list_entry(&resource->connections, struct drbd_connection, connections);
3561
3562 found_connection:
3563 list_for_each_entry_continue_rcu(connection, &resource->connections, connections) {
3564 if (!has_net_conf(connection))
3565 continue;
3566 retcode = NO_ERROR;
3567 goto put_result; /* only one iteration */
3568 }
3569
3570 no_more_connections:
3571 if (cb->args[1] == ITERATE_RESOURCES) {
3572 for_each_resource_rcu(next_resource, &drbd_resources) {
3573 if (next_resource == resource)
3574 goto found_resource;
3575 }
3576 /* resource was probably deleted */
3577 }
3578 goto out;
3579
3580 found_resource:
3581 list_for_each_entry_continue_rcu(next_resource, &drbd_resources, resources) {
3582 mutex_unlock(&resource->conf_update);
3583 kref_put(&resource->kref, drbd_destroy_resource);
3584 resource = next_resource;
3585 kref_get(&resource->kref);
3586 cb->args[0] = (long)resource;
3587 cb->args[2] = 0;
3588 goto next_resource;
3589 }
3590 goto out; /* no more resources */
3591
3592 put_result:
3593 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
3594 cb->nlh->nlmsg_seq, &drbd_genl_family,
3595 NLM_F_MULTI, DRBD_ADM_GET_CONNECTIONS);
3596 err = -ENOMEM;
3597 if (!dh)
3598 goto out;
3599 dh->ret_code = retcode;
3600 dh->minor = -1U;
3601 if (retcode == NO_ERROR) {
3602 struct net_conf *net_conf;
3603
3604 err = nla_put_drbd_cfg_context(skb, resource, connection, NULL);
3605 if (err)
3606 goto out;
3607 net_conf = rcu_dereference(connection->net_conf);
3608 if (net_conf) {
3609 err = net_conf_to_skb(skb, net_conf, !capable(CAP_SYS_ADMIN));
3610 if (err)
3611 goto out;
3612 }
3613 connection_to_info(&connection_info, connection);
3614 err = connection_info_to_skb(skb, &connection_info, !capable(CAP_SYS_ADMIN));
3615 if (err)
3616 goto out;
3617 connection_statistics.conn_congested = test_bit(NET_CONGESTED, &connection->flags);
3618 err = connection_statistics_to_skb(skb, &connection_statistics, !capable(CAP_SYS_ADMIN));
3619 if (err)
3620 goto out;
3621 cb->args[2] = (long)connection;
3622 }
3623 genlmsg_end(skb, dh);
3624 err = 0;
3625
3626 out:
3627 rcu_read_unlock();
3628 if (resource)
3629 mutex_unlock(&resource->conf_update);
3630 if (err)
3631 return err;
3632 return skb->len;
3633 }
3634
3635 enum mdf_peer_flag {
3636 MDF_PEER_CONNECTED = 1 << 0,
3637 MDF_PEER_OUTDATED = 1 << 1,
3638 MDF_PEER_FENCING = 1 << 2,
3639 MDF_PEER_FULL_SYNC = 1 << 3,
3640 };
3641
peer_device_to_statistics(struct peer_device_statistics * s,struct drbd_peer_device * peer_device)3642 static void peer_device_to_statistics(struct peer_device_statistics *s,
3643 struct drbd_peer_device *peer_device)
3644 {
3645 struct drbd_device *device = peer_device->device;
3646
3647 memset(s, 0, sizeof(*s));
3648 s->peer_dev_received = device->recv_cnt;
3649 s->peer_dev_sent = device->send_cnt;
3650 s->peer_dev_pending = atomic_read(&device->ap_pending_cnt) +
3651 atomic_read(&device->rs_pending_cnt);
3652 s->peer_dev_unacked = atomic_read(&device->unacked_cnt);
3653 s->peer_dev_out_of_sync = drbd_bm_total_weight(device) << (BM_BLOCK_SHIFT - 9);
3654 s->peer_dev_resync_failed = device->rs_failed << (BM_BLOCK_SHIFT - 9);
3655 if (get_ldev(device)) {
3656 struct drbd_md *md = &device->ldev->md;
3657
3658 spin_lock_irq(&md->uuid_lock);
3659 s->peer_dev_bitmap_uuid = md->uuid[UI_BITMAP];
3660 spin_unlock_irq(&md->uuid_lock);
3661 s->peer_dev_flags =
3662 (drbd_md_test_flag(device->ldev, MDF_CONNECTED_IND) ?
3663 MDF_PEER_CONNECTED : 0) +
3664 (drbd_md_test_flag(device->ldev, MDF_CONSISTENT) &&
3665 !drbd_md_test_flag(device->ldev, MDF_WAS_UP_TO_DATE) ?
3666 MDF_PEER_OUTDATED : 0) +
3667 /* FIXME: MDF_PEER_FENCING? */
3668 (drbd_md_test_flag(device->ldev, MDF_FULL_SYNC) ?
3669 MDF_PEER_FULL_SYNC : 0);
3670 put_ldev(device);
3671 }
3672 }
3673
drbd_adm_dump_peer_devices_done(struct netlink_callback * cb)3674 int drbd_adm_dump_peer_devices_done(struct netlink_callback *cb)
3675 {
3676 return put_resource_in_arg0(cb, 9);
3677 }
3678
drbd_adm_dump_peer_devices(struct sk_buff * skb,struct netlink_callback * cb)3679 int drbd_adm_dump_peer_devices(struct sk_buff *skb, struct netlink_callback *cb)
3680 {
3681 struct nlattr *resource_filter;
3682 struct drbd_resource *resource;
3683 struct drbd_device *device;
3684 struct drbd_peer_device *peer_device = NULL;
3685 int minor, err, retcode;
3686 struct drbd_genlmsghdr *dh;
3687 struct idr *idr_to_search;
3688
3689 resource = (struct drbd_resource *)cb->args[0];
3690 if (!cb->args[0] && !cb->args[1]) {
3691 resource_filter = find_cfg_context_attr(cb->nlh, T_ctx_resource_name);
3692 if (resource_filter) {
3693 retcode = ERR_RES_NOT_KNOWN;
3694 resource = drbd_find_resource(nla_data(resource_filter));
3695 if (!resource) {
3696 rcu_read_lock();
3697 goto put_result;
3698 }
3699 }
3700 cb->args[0] = (long)resource;
3701 }
3702
3703 rcu_read_lock();
3704 minor = cb->args[1];
3705 idr_to_search = resource ? &resource->devices : &drbd_devices;
3706 device = idr_find(idr_to_search, minor);
3707 if (!device) {
3708 next_device:
3709 minor++;
3710 cb->args[2] = 0;
3711 device = idr_get_next(idr_to_search, &minor);
3712 if (!device) {
3713 err = 0;
3714 goto out;
3715 }
3716 }
3717 if (cb->args[2]) {
3718 for_each_peer_device(peer_device, device)
3719 if (peer_device == (struct drbd_peer_device *)cb->args[2])
3720 goto found_peer_device;
3721 /* peer device was probably deleted */
3722 goto next_device;
3723 }
3724 /* Make peer_device point to the list head (not the first entry). */
3725 peer_device = list_entry(&device->peer_devices, struct drbd_peer_device, peer_devices);
3726
3727 found_peer_device:
3728 list_for_each_entry_continue_rcu(peer_device, &device->peer_devices, peer_devices) {
3729 if (!has_net_conf(peer_device->connection))
3730 continue;
3731 retcode = NO_ERROR;
3732 goto put_result; /* only one iteration */
3733 }
3734 goto next_device;
3735
3736 put_result:
3737 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
3738 cb->nlh->nlmsg_seq, &drbd_genl_family,
3739 NLM_F_MULTI, DRBD_ADM_GET_PEER_DEVICES);
3740 err = -ENOMEM;
3741 if (!dh)
3742 goto out;
3743 dh->ret_code = retcode;
3744 dh->minor = -1U;
3745 if (retcode == NO_ERROR) {
3746 struct peer_device_info peer_device_info;
3747 struct peer_device_statistics peer_device_statistics;
3748
3749 dh->minor = minor;
3750 err = nla_put_drbd_cfg_context(skb, device->resource, peer_device->connection, device);
3751 if (err)
3752 goto out;
3753 peer_device_to_info(&peer_device_info, peer_device);
3754 err = peer_device_info_to_skb(skb, &peer_device_info, !capable(CAP_SYS_ADMIN));
3755 if (err)
3756 goto out;
3757 peer_device_to_statistics(&peer_device_statistics, peer_device);
3758 err = peer_device_statistics_to_skb(skb, &peer_device_statistics, !capable(CAP_SYS_ADMIN));
3759 if (err)
3760 goto out;
3761 cb->args[1] = minor;
3762 cb->args[2] = (long)peer_device;
3763 }
3764 genlmsg_end(skb, dh);
3765 err = 0;
3766
3767 out:
3768 rcu_read_unlock();
3769 if (err)
3770 return err;
3771 return skb->len;
3772 }
3773 /*
3774 * Return the connection of @resource if @resource has exactly one connection.
3775 */
the_only_connection(struct drbd_resource * resource)3776 static struct drbd_connection *the_only_connection(struct drbd_resource *resource)
3777 {
3778 struct list_head *connections = &resource->connections;
3779
3780 if (list_empty(connections) || connections->next->next != connections)
3781 return NULL;
3782 return list_first_entry(&resource->connections, struct drbd_connection, connections);
3783 }
3784
nla_put_status_info(struct sk_buff * skb,struct drbd_device * device,const struct sib_info * sib)3785 static int nla_put_status_info(struct sk_buff *skb, struct drbd_device *device,
3786 const struct sib_info *sib)
3787 {
3788 struct drbd_resource *resource = device->resource;
3789 struct state_info *si = NULL; /* for sizeof(si->member); */
3790 struct nlattr *nla;
3791 int got_ldev;
3792 int err = 0;
3793 int exclude_sensitive;
3794
3795 /* If sib != NULL, this is drbd_bcast_event, which anyone can listen
3796 * to. So we better exclude_sensitive information.
3797 *
3798 * If sib == NULL, this is drbd_adm_get_status, executed synchronously
3799 * in the context of the requesting user process. Exclude sensitive
3800 * information, unless current has superuser.
3801 *
3802 * NOTE: for drbd_adm_get_status_all(), this is a netlink dump, and
3803 * relies on the current implementation of netlink_dump(), which
3804 * executes the dump callback successively from netlink_recvmsg(),
3805 * always in the context of the receiving process */
3806 exclude_sensitive = sib || !capable(CAP_SYS_ADMIN);
3807
3808 got_ldev = get_ldev(device);
3809
3810 /* We need to add connection name and volume number information still.
3811 * Minor number is in drbd_genlmsghdr. */
3812 if (nla_put_drbd_cfg_context(skb, resource, the_only_connection(resource), device))
3813 goto nla_put_failure;
3814
3815 if (res_opts_to_skb(skb, &device->resource->res_opts, exclude_sensitive))
3816 goto nla_put_failure;
3817
3818 rcu_read_lock();
3819 if (got_ldev) {
3820 struct disk_conf *disk_conf;
3821
3822 disk_conf = rcu_dereference(device->ldev->disk_conf);
3823 err = disk_conf_to_skb(skb, disk_conf, exclude_sensitive);
3824 }
3825 if (!err) {
3826 struct net_conf *nc;
3827
3828 nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
3829 if (nc)
3830 err = net_conf_to_skb(skb, nc, exclude_sensitive);
3831 }
3832 rcu_read_unlock();
3833 if (err)
3834 goto nla_put_failure;
3835
3836 nla = nla_nest_start_noflag(skb, DRBD_NLA_STATE_INFO);
3837 if (!nla)
3838 goto nla_put_failure;
3839 if (nla_put_u32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY) ||
3840 nla_put_u32(skb, T_current_state, device->state.i) ||
3841 nla_put_u64_0pad(skb, T_ed_uuid, device->ed_uuid) ||
3842 nla_put_u64_0pad(skb, T_capacity, get_capacity(device->vdisk)) ||
3843 nla_put_u64_0pad(skb, T_send_cnt, device->send_cnt) ||
3844 nla_put_u64_0pad(skb, T_recv_cnt, device->recv_cnt) ||
3845 nla_put_u64_0pad(skb, T_read_cnt, device->read_cnt) ||
3846 nla_put_u64_0pad(skb, T_writ_cnt, device->writ_cnt) ||
3847 nla_put_u64_0pad(skb, T_al_writ_cnt, device->al_writ_cnt) ||
3848 nla_put_u64_0pad(skb, T_bm_writ_cnt, device->bm_writ_cnt) ||
3849 nla_put_u32(skb, T_ap_bio_cnt, atomic_read(&device->ap_bio_cnt)) ||
3850 nla_put_u32(skb, T_ap_pending_cnt, atomic_read(&device->ap_pending_cnt)) ||
3851 nla_put_u32(skb, T_rs_pending_cnt, atomic_read(&device->rs_pending_cnt)))
3852 goto nla_put_failure;
3853
3854 if (got_ldev) {
3855 int err;
3856
3857 spin_lock_irq(&device->ldev->md.uuid_lock);
3858 err = nla_put(skb, T_uuids, sizeof(si->uuids), device->ldev->md.uuid);
3859 spin_unlock_irq(&device->ldev->md.uuid_lock);
3860
3861 if (err)
3862 goto nla_put_failure;
3863
3864 if (nla_put_u32(skb, T_disk_flags, device->ldev->md.flags) ||
3865 nla_put_u64_0pad(skb, T_bits_total, drbd_bm_bits(device)) ||
3866 nla_put_u64_0pad(skb, T_bits_oos,
3867 drbd_bm_total_weight(device)))
3868 goto nla_put_failure;
3869 if (C_SYNC_SOURCE <= device->state.conn &&
3870 C_PAUSED_SYNC_T >= device->state.conn) {
3871 if (nla_put_u64_0pad(skb, T_bits_rs_total,
3872 device->rs_total) ||
3873 nla_put_u64_0pad(skb, T_bits_rs_failed,
3874 device->rs_failed))
3875 goto nla_put_failure;
3876 }
3877 }
3878
3879 if (sib) {
3880 switch(sib->sib_reason) {
3881 case SIB_SYNC_PROGRESS:
3882 case SIB_GET_STATUS_REPLY:
3883 break;
3884 case SIB_STATE_CHANGE:
3885 if (nla_put_u32(skb, T_prev_state, sib->os.i) ||
3886 nla_put_u32(skb, T_new_state, sib->ns.i))
3887 goto nla_put_failure;
3888 break;
3889 case SIB_HELPER_POST:
3890 if (nla_put_u32(skb, T_helper_exit_code,
3891 sib->helper_exit_code))
3892 goto nla_put_failure;
3893 fallthrough;
3894 case SIB_HELPER_PRE:
3895 if (nla_put_string(skb, T_helper, sib->helper_name))
3896 goto nla_put_failure;
3897 break;
3898 }
3899 }
3900 nla_nest_end(skb, nla);
3901
3902 if (0)
3903 nla_put_failure:
3904 err = -EMSGSIZE;
3905 if (got_ldev)
3906 put_ldev(device);
3907 return err;
3908 }
3909
drbd_adm_get_status(struct sk_buff * skb,struct genl_info * info)3910 int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info)
3911 {
3912 struct drbd_config_context *adm_ctx = info->user_ptr[0];
3913 enum drbd_ret_code retcode;
3914 int err;
3915
3916 if (!adm_ctx->reply_skb)
3917 return 0;
3918 retcode = adm_ctx->reply_dh->ret_code;
3919 if (retcode != NO_ERROR)
3920 goto out;
3921
3922 err = nla_put_status_info(adm_ctx->reply_skb, adm_ctx->device, NULL);
3923 if (err) {
3924 nlmsg_free(adm_ctx->reply_skb);
3925 adm_ctx->reply_skb = NULL;
3926 return err;
3927 }
3928 out:
3929 adm_ctx->reply_dh->ret_code = retcode;
3930 return 0;
3931 }
3932
get_one_status(struct sk_buff * skb,struct netlink_callback * cb)3933 static int get_one_status(struct sk_buff *skb, struct netlink_callback *cb)
3934 {
3935 struct drbd_device *device;
3936 struct drbd_genlmsghdr *dh;
3937 struct drbd_resource *pos = (struct drbd_resource *)cb->args[0];
3938 struct drbd_resource *resource = NULL;
3939 struct drbd_resource *tmp;
3940 unsigned volume = cb->args[1];
3941
3942 /* Open coded, deferred, iteration:
3943 * for_each_resource_safe(resource, tmp, &drbd_resources) {
3944 * connection = "first connection of resource or undefined";
3945 * idr_for_each_entry(&resource->devices, device, i) {
3946 * ...
3947 * }
3948 * }
3949 * where resource is cb->args[0];
3950 * and i is cb->args[1];
3951 *
3952 * cb->args[2] indicates if we shall loop over all resources,
3953 * or just dump all volumes of a single resource.
3954 *
3955 * This may miss entries inserted after this dump started,
3956 * or entries deleted before they are reached.
3957 *
3958 * We need to make sure the device won't disappear while
3959 * we are looking at it, and revalidate our iterators
3960 * on each iteration.
3961 */
3962
3963 /* synchronize with conn_create()/drbd_destroy_connection() */
3964 rcu_read_lock();
3965 /* revalidate iterator position */
3966 for_each_resource_rcu(tmp, &drbd_resources) {
3967 if (pos == NULL) {
3968 /* first iteration */
3969 pos = tmp;
3970 resource = pos;
3971 break;
3972 }
3973 if (tmp == pos) {
3974 resource = pos;
3975 break;
3976 }
3977 }
3978 if (resource) {
3979 next_resource:
3980 device = idr_get_next(&resource->devices, &volume);
3981 if (!device) {
3982 /* No more volumes to dump on this resource.
3983 * Advance resource iterator. */
3984 pos = list_entry_rcu(resource->resources.next,
3985 struct drbd_resource, resources);
3986 /* Did we dump any volume of this resource yet? */
3987 if (volume != 0) {
3988 /* If we reached the end of the list,
3989 * or only a single resource dump was requested,
3990 * we are done. */
3991 if (&pos->resources == &drbd_resources || cb->args[2])
3992 goto out;
3993 volume = 0;
3994 resource = pos;
3995 goto next_resource;
3996 }
3997 }
3998
3999 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
4000 cb->nlh->nlmsg_seq, &drbd_genl_family,
4001 NLM_F_MULTI, DRBD_ADM_GET_STATUS);
4002 if (!dh)
4003 goto out;
4004
4005 if (!device) {
4006 /* This is a connection without a single volume.
4007 * Suprisingly enough, it may have a network
4008 * configuration. */
4009 struct drbd_connection *connection;
4010
4011 dh->minor = -1U;
4012 dh->ret_code = NO_ERROR;
4013 connection = the_only_connection(resource);
4014 if (nla_put_drbd_cfg_context(skb, resource, connection, NULL))
4015 goto cancel;
4016 if (connection) {
4017 struct net_conf *nc;
4018
4019 nc = rcu_dereference(connection->net_conf);
4020 if (nc && net_conf_to_skb(skb, nc, 1) != 0)
4021 goto cancel;
4022 }
4023 goto done;
4024 }
4025
4026 D_ASSERT(device, device->vnr == volume);
4027 D_ASSERT(device, device->resource == resource);
4028
4029 dh->minor = device_to_minor(device);
4030 dh->ret_code = NO_ERROR;
4031
4032 if (nla_put_status_info(skb, device, NULL)) {
4033 cancel:
4034 genlmsg_cancel(skb, dh);
4035 goto out;
4036 }
4037 done:
4038 genlmsg_end(skb, dh);
4039 }
4040
4041 out:
4042 rcu_read_unlock();
4043 /* where to start the next iteration */
4044 cb->args[0] = (long)pos;
4045 cb->args[1] = (pos == resource) ? volume + 1 : 0;
4046
4047 /* No more resources/volumes/minors found results in an empty skb.
4048 * Which will terminate the dump. */
4049 return skb->len;
4050 }
4051
4052 /*
4053 * Request status of all resources, or of all volumes within a single resource.
4054 *
4055 * This is a dump, as the answer may not fit in a single reply skb otherwise.
4056 * Which means we cannot use the family->attrbuf or other such members, because
4057 * dump is NOT protected by the genl_lock(). During dump, we only have access
4058 * to the incoming skb, and need to opencode "parsing" of the nlattr payload.
4059 *
4060 * Once things are setup properly, we call into get_one_status().
4061 */
drbd_adm_get_status_all(struct sk_buff * skb,struct netlink_callback * cb)4062 int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
4063 {
4064 const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ;
4065 struct nlattr *nla;
4066 const char *resource_name;
4067 struct drbd_resource *resource;
4068
4069 /* Is this a followup call? */
4070 if (cb->args[0]) {
4071 /* ... of a single resource dump,
4072 * and the resource iterator has been advanced already? */
4073 if (cb->args[2] && cb->args[2] != cb->args[0])
4074 return 0; /* DONE. */
4075 goto dump;
4076 }
4077
4078 /* First call (from netlink_dump_start). We need to figure out
4079 * which resource(s) the user wants us to dump. */
4080 nla = nla_find(nlmsg_attrdata(cb->nlh, hdrlen),
4081 nlmsg_attrlen(cb->nlh, hdrlen),
4082 DRBD_NLA_CFG_CONTEXT);
4083
4084 /* No explicit context given. Dump all. */
4085 if (!nla)
4086 goto dump;
4087 nla = nla_find_nested(nla, T_ctx_resource_name);
4088 /* context given, but no name present? */
4089 if (!nla)
4090 return -EINVAL;
4091 resource_name = nla_data(nla);
4092 if (!*resource_name)
4093 return -ENODEV;
4094 resource = drbd_find_resource(resource_name);
4095 if (!resource)
4096 return -ENODEV;
4097
4098 kref_put(&resource->kref, drbd_destroy_resource); /* get_one_status() revalidates the resource */
4099
4100 /* prime iterators, and set "filter" mode mark:
4101 * only dump this connection. */
4102 cb->args[0] = (long)resource;
4103 /* cb->args[1] = 0; passed in this way. */
4104 cb->args[2] = (long)resource;
4105
4106 dump:
4107 return get_one_status(skb, cb);
4108 }
4109
drbd_adm_get_timeout_type(struct sk_buff * skb,struct genl_info * info)4110 int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info)
4111 {
4112 struct drbd_config_context *adm_ctx = info->user_ptr[0];
4113 enum drbd_ret_code retcode;
4114 struct timeout_parms tp;
4115 int err;
4116
4117 if (!adm_ctx->reply_skb)
4118 return 0;
4119 retcode = adm_ctx->reply_dh->ret_code;
4120 if (retcode != NO_ERROR)
4121 goto out;
4122
4123 tp.timeout_type =
4124 adm_ctx->device->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
4125 test_bit(USE_DEGR_WFC_T, &adm_ctx->device->flags) ? UT_DEGRADED :
4126 UT_DEFAULT;
4127
4128 err = timeout_parms_to_priv_skb(adm_ctx->reply_skb, &tp);
4129 if (err) {
4130 nlmsg_free(adm_ctx->reply_skb);
4131 adm_ctx->reply_skb = NULL;
4132 return err;
4133 }
4134 out:
4135 adm_ctx->reply_dh->ret_code = retcode;
4136 return 0;
4137 }
4138
drbd_adm_start_ov(struct sk_buff * skb,struct genl_info * info)4139 int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
4140 {
4141 struct drbd_config_context *adm_ctx = info->user_ptr[0];
4142 struct drbd_device *device;
4143 enum drbd_ret_code retcode;
4144 struct start_ov_parms parms;
4145
4146 if (!adm_ctx->reply_skb)
4147 return 0;
4148 retcode = adm_ctx->reply_dh->ret_code;
4149 if (retcode != NO_ERROR)
4150 goto out;
4151
4152 device = adm_ctx->device;
4153
4154 /* resume from last known position, if possible */
4155 parms.ov_start_sector = device->ov_start_sector;
4156 parms.ov_stop_sector = ULLONG_MAX;
4157 if (info->attrs[DRBD_NLA_START_OV_PARMS]) {
4158 int err = start_ov_parms_from_attrs(&parms, info);
4159 if (err) {
4160 retcode = ERR_MANDATORY_TAG;
4161 drbd_msg_put_info(adm_ctx->reply_skb, from_attrs_err_to_txt(err));
4162 goto out;
4163 }
4164 }
4165 mutex_lock(&adm_ctx->resource->adm_mutex);
4166
4167 /* w_make_ov_request expects position to be aligned */
4168 device->ov_start_sector = parms.ov_start_sector & ~(BM_SECT_PER_BIT-1);
4169 device->ov_stop_sector = parms.ov_stop_sector;
4170
4171 /* If there is still bitmap IO pending, e.g. previous resync or verify
4172 * just being finished, wait for it before requesting a new resync. */
4173 drbd_suspend_io(device);
4174 wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags));
4175 retcode = drbd_request_state(device, NS(conn, C_VERIFY_S));
4176 drbd_resume_io(device);
4177
4178 mutex_unlock(&adm_ctx->resource->adm_mutex);
4179 out:
4180 adm_ctx->reply_dh->ret_code = retcode;
4181 return 0;
4182 }
4183
4184
drbd_adm_new_c_uuid(struct sk_buff * skb,struct genl_info * info)4185 int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
4186 {
4187 struct drbd_config_context *adm_ctx = info->user_ptr[0];
4188 struct drbd_device *device;
4189 enum drbd_ret_code retcode;
4190 int skip_initial_sync = 0;
4191 int err;
4192 struct new_c_uuid_parms args;
4193
4194 if (!adm_ctx->reply_skb)
4195 return 0;
4196 retcode = adm_ctx->reply_dh->ret_code;
4197 if (retcode != NO_ERROR)
4198 goto out_nolock;
4199
4200 device = adm_ctx->device;
4201 memset(&args, 0, sizeof(args));
4202 if (info->attrs[DRBD_NLA_NEW_C_UUID_PARMS]) {
4203 err = new_c_uuid_parms_from_attrs(&args, info);
4204 if (err) {
4205 retcode = ERR_MANDATORY_TAG;
4206 drbd_msg_put_info(adm_ctx->reply_skb, from_attrs_err_to_txt(err));
4207 goto out_nolock;
4208 }
4209 }
4210
4211 mutex_lock(&adm_ctx->resource->adm_mutex);
4212 mutex_lock(device->state_mutex); /* Protects us against serialized state changes. */
4213
4214 if (!get_ldev(device)) {
4215 retcode = ERR_NO_DISK;
4216 goto out;
4217 }
4218
4219 /* this is "skip initial sync", assume to be clean */
4220 if (device->state.conn == C_CONNECTED &&
4221 first_peer_device(device)->connection->agreed_pro_version >= 90 &&
4222 device->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
4223 drbd_info(device, "Preparing to skip initial sync\n");
4224 skip_initial_sync = 1;
4225 } else if (device->state.conn != C_STANDALONE) {
4226 retcode = ERR_CONNECTED;
4227 goto out_dec;
4228 }
4229
4230 drbd_uuid_set(device, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */
4231 drbd_uuid_new_current(device); /* New current, previous to UI_BITMAP */
4232
4233 if (args.clear_bm) {
4234 err = drbd_bitmap_io(device, &drbd_bmio_clear_n_write,
4235 "clear_n_write from new_c_uuid", BM_LOCKED_MASK, NULL);
4236 if (err) {
4237 drbd_err(device, "Writing bitmap failed with %d\n", err);
4238 retcode = ERR_IO_MD_DISK;
4239 }
4240 if (skip_initial_sync) {
4241 drbd_send_uuids_skip_initial_sync(first_peer_device(device));
4242 _drbd_uuid_set(device, UI_BITMAP, 0);
4243 drbd_print_uuids(device, "cleared bitmap UUID");
4244 spin_lock_irq(&device->resource->req_lock);
4245 _drbd_set_state(_NS2(device, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
4246 CS_VERBOSE, NULL);
4247 spin_unlock_irq(&device->resource->req_lock);
4248 }
4249 }
4250
4251 drbd_md_sync(device);
4252 out_dec:
4253 put_ldev(device);
4254 out:
4255 mutex_unlock(device->state_mutex);
4256 mutex_unlock(&adm_ctx->resource->adm_mutex);
4257 out_nolock:
4258 adm_ctx->reply_dh->ret_code = retcode;
4259 return 0;
4260 }
4261
4262 static enum drbd_ret_code
drbd_check_resource_name(struct drbd_config_context * adm_ctx)4263 drbd_check_resource_name(struct drbd_config_context *adm_ctx)
4264 {
4265 const char *name = adm_ctx->resource_name;
4266 if (!name || !name[0]) {
4267 drbd_msg_put_info(adm_ctx->reply_skb, "resource name missing");
4268 return ERR_MANDATORY_TAG;
4269 }
4270 /* if we want to use these in sysfs/configfs/debugfs some day,
4271 * we must not allow slashes */
4272 if (strchr(name, '/')) {
4273 drbd_msg_put_info(adm_ctx->reply_skb, "invalid resource name");
4274 return ERR_INVALID_REQUEST;
4275 }
4276 return NO_ERROR;
4277 }
4278
resource_to_info(struct resource_info * info,struct drbd_resource * resource)4279 static void resource_to_info(struct resource_info *info,
4280 struct drbd_resource *resource)
4281 {
4282 info->res_role = conn_highest_role(first_connection(resource));
4283 info->res_susp = resource->susp;
4284 info->res_susp_nod = resource->susp_nod;
4285 info->res_susp_fen = resource->susp_fen;
4286 }
4287
drbd_adm_new_resource(struct sk_buff * skb,struct genl_info * info)4288 int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info)
4289 {
4290 struct drbd_connection *connection;
4291 struct drbd_config_context *adm_ctx = info->user_ptr[0];
4292 enum drbd_ret_code retcode;
4293 struct res_opts res_opts;
4294 int err;
4295
4296 if (!adm_ctx->reply_skb)
4297 return 0;
4298 retcode = adm_ctx->reply_dh->ret_code;
4299 if (retcode != NO_ERROR)
4300 goto out;
4301
4302 set_res_opts_defaults(&res_opts);
4303 err = res_opts_from_attrs(&res_opts, info);
4304 if (err && err != -ENOMSG) {
4305 retcode = ERR_MANDATORY_TAG;
4306 drbd_msg_put_info(adm_ctx->reply_skb, from_attrs_err_to_txt(err));
4307 goto out;
4308 }
4309
4310 retcode = drbd_check_resource_name(adm_ctx);
4311 if (retcode != NO_ERROR)
4312 goto out;
4313
4314 if (adm_ctx->resource) {
4315 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) {
4316 retcode = ERR_INVALID_REQUEST;
4317 drbd_msg_put_info(adm_ctx->reply_skb, "resource exists");
4318 }
4319 /* else: still NO_ERROR */
4320 goto out;
4321 }
4322
4323 /* not yet safe for genl_family.parallel_ops */
4324 mutex_lock(&resources_mutex);
4325 connection = conn_create(adm_ctx->resource_name, &res_opts);
4326 mutex_unlock(&resources_mutex);
4327
4328 if (connection) {
4329 struct resource_info resource_info;
4330
4331 mutex_lock(¬ification_mutex);
4332 resource_to_info(&resource_info, connection->resource);
4333 notify_resource_state(NULL, 0, connection->resource,
4334 &resource_info, NOTIFY_CREATE);
4335 mutex_unlock(¬ification_mutex);
4336 } else
4337 retcode = ERR_NOMEM;
4338
4339 out:
4340 adm_ctx->reply_dh->ret_code = retcode;
4341 return 0;
4342 }
4343
device_to_info(struct device_info * info,struct drbd_device * device)4344 static void device_to_info(struct device_info *info,
4345 struct drbd_device *device)
4346 {
4347 info->dev_disk_state = device->state.disk;
4348 }
4349
4350
drbd_adm_new_minor(struct sk_buff * skb,struct genl_info * info)4351 int drbd_adm_new_minor(struct sk_buff *skb, struct genl_info *info)
4352 {
4353 struct drbd_config_context *adm_ctx = info->user_ptr[0];
4354 struct drbd_genlmsghdr *dh = genl_info_userhdr(info);
4355 enum drbd_ret_code retcode;
4356
4357 if (!adm_ctx->reply_skb)
4358 return 0;
4359 retcode = adm_ctx->reply_dh->ret_code;
4360 if (retcode != NO_ERROR)
4361 goto out;
4362
4363 if (dh->minor > MINORMASK) {
4364 drbd_msg_put_info(adm_ctx->reply_skb, "requested minor out of range");
4365 retcode = ERR_INVALID_REQUEST;
4366 goto out;
4367 }
4368 if (adm_ctx->volume > DRBD_VOLUME_MAX) {
4369 drbd_msg_put_info(adm_ctx->reply_skb, "requested volume id out of range");
4370 retcode = ERR_INVALID_REQUEST;
4371 goto out;
4372 }
4373
4374 /* drbd_adm_prepare made sure already
4375 * that first_peer_device(device)->connection and device->vnr match the request. */
4376 if (adm_ctx->device) {
4377 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
4378 retcode = ERR_MINOR_OR_VOLUME_EXISTS;
4379 /* else: still NO_ERROR */
4380 goto out;
4381 }
4382
4383 mutex_lock(&adm_ctx->resource->adm_mutex);
4384 retcode = drbd_create_device(adm_ctx, dh->minor);
4385 if (retcode == NO_ERROR) {
4386 struct drbd_device *device;
4387 struct drbd_peer_device *peer_device;
4388 struct device_info info;
4389 unsigned int peer_devices = 0;
4390 enum drbd_notification_type flags;
4391
4392 device = minor_to_device(dh->minor);
4393 for_each_peer_device(peer_device, device) {
4394 if (!has_net_conf(peer_device->connection))
4395 continue;
4396 peer_devices++;
4397 }
4398
4399 device_to_info(&info, device);
4400 mutex_lock(¬ification_mutex);
4401 flags = (peer_devices--) ? NOTIFY_CONTINUES : 0;
4402 notify_device_state(NULL, 0, device, &info, NOTIFY_CREATE | flags);
4403 for_each_peer_device(peer_device, device) {
4404 struct peer_device_info peer_device_info;
4405
4406 if (!has_net_conf(peer_device->connection))
4407 continue;
4408 peer_device_to_info(&peer_device_info, peer_device);
4409 flags = (peer_devices--) ? NOTIFY_CONTINUES : 0;
4410 notify_peer_device_state(NULL, 0, peer_device, &peer_device_info,
4411 NOTIFY_CREATE | flags);
4412 }
4413 mutex_unlock(¬ification_mutex);
4414 }
4415 mutex_unlock(&adm_ctx->resource->adm_mutex);
4416 out:
4417 adm_ctx->reply_dh->ret_code = retcode;
4418 return 0;
4419 }
4420
adm_del_minor(struct drbd_device * device)4421 static enum drbd_ret_code adm_del_minor(struct drbd_device *device)
4422 {
4423 struct drbd_peer_device *peer_device;
4424
4425 if (device->state.disk == D_DISKLESS &&
4426 /* no need to be device->state.conn == C_STANDALONE &&
4427 * we may want to delete a minor from a live replication group.
4428 */
4429 device->state.role == R_SECONDARY) {
4430 struct drbd_connection *connection =
4431 first_connection(device->resource);
4432
4433 _drbd_request_state(device, NS(conn, C_WF_REPORT_PARAMS),
4434 CS_VERBOSE + CS_WAIT_COMPLETE);
4435
4436 /* If the state engine hasn't stopped the sender thread yet, we
4437 * need to flush the sender work queue before generating the
4438 * DESTROY events here. */
4439 if (get_t_state(&connection->worker) == RUNNING)
4440 drbd_flush_workqueue(&connection->sender_work);
4441
4442 mutex_lock(¬ification_mutex);
4443 for_each_peer_device(peer_device, device) {
4444 if (!has_net_conf(peer_device->connection))
4445 continue;
4446 notify_peer_device_state(NULL, 0, peer_device, NULL,
4447 NOTIFY_DESTROY | NOTIFY_CONTINUES);
4448 }
4449 notify_device_state(NULL, 0, device, NULL, NOTIFY_DESTROY);
4450 mutex_unlock(¬ification_mutex);
4451
4452 drbd_delete_device(device);
4453 return NO_ERROR;
4454 } else
4455 return ERR_MINOR_CONFIGURED;
4456 }
4457
drbd_adm_del_minor(struct sk_buff * skb,struct genl_info * info)4458 int drbd_adm_del_minor(struct sk_buff *skb, struct genl_info *info)
4459 {
4460 struct drbd_config_context *adm_ctx = info->user_ptr[0];
4461 enum drbd_ret_code retcode;
4462
4463 if (!adm_ctx->reply_skb)
4464 return 0;
4465 retcode = adm_ctx->reply_dh->ret_code;
4466 if (retcode != NO_ERROR)
4467 goto out;
4468
4469 mutex_lock(&adm_ctx->resource->adm_mutex);
4470 retcode = adm_del_minor(adm_ctx->device);
4471 mutex_unlock(&adm_ctx->resource->adm_mutex);
4472 out:
4473 adm_ctx->reply_dh->ret_code = retcode;
4474 return 0;
4475 }
4476
adm_del_resource(struct drbd_resource * resource)4477 static int adm_del_resource(struct drbd_resource *resource)
4478 {
4479 struct drbd_connection *connection;
4480
4481 for_each_connection(connection, resource) {
4482 if (connection->cstate > C_STANDALONE)
4483 return ERR_NET_CONFIGURED;
4484 }
4485 if (!idr_is_empty(&resource->devices))
4486 return ERR_RES_IN_USE;
4487
4488 /* The state engine has stopped the sender thread, so we don't
4489 * need to flush the sender work queue before generating the
4490 * DESTROY event here. */
4491 mutex_lock(¬ification_mutex);
4492 notify_resource_state(NULL, 0, resource, NULL, NOTIFY_DESTROY);
4493 mutex_unlock(¬ification_mutex);
4494
4495 mutex_lock(&resources_mutex);
4496 list_del_rcu(&resource->resources);
4497 mutex_unlock(&resources_mutex);
4498 /* Make sure all threads have actually stopped: state handling only
4499 * does drbd_thread_stop_nowait(). */
4500 list_for_each_entry(connection, &resource->connections, connections)
4501 drbd_thread_stop(&connection->worker);
4502 synchronize_rcu();
4503 drbd_free_resource(resource);
4504 return NO_ERROR;
4505 }
4506
drbd_adm_down(struct sk_buff * skb,struct genl_info * info)4507 int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
4508 {
4509 struct drbd_config_context *adm_ctx = info->user_ptr[0];
4510 struct drbd_resource *resource;
4511 struct drbd_connection *connection;
4512 struct drbd_device *device;
4513 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
4514 unsigned i;
4515
4516 if (!adm_ctx->reply_skb)
4517 return 0;
4518 retcode = adm_ctx->reply_dh->ret_code;
4519 if (retcode != NO_ERROR)
4520 goto finish;
4521
4522 resource = adm_ctx->resource;
4523 mutex_lock(&resource->adm_mutex);
4524 /* demote */
4525 for_each_connection(connection, resource) {
4526 struct drbd_peer_device *peer_device;
4527
4528 idr_for_each_entry(&connection->peer_devices, peer_device, i) {
4529 retcode = drbd_set_role(peer_device->device, R_SECONDARY, 0);
4530 if (retcode < SS_SUCCESS) {
4531 drbd_msg_put_info(adm_ctx->reply_skb, "failed to demote");
4532 goto out;
4533 }
4534 }
4535
4536 retcode = conn_try_disconnect(connection, 0);
4537 if (retcode < SS_SUCCESS) {
4538 drbd_msg_put_info(adm_ctx->reply_skb, "failed to disconnect");
4539 goto out;
4540 }
4541 }
4542
4543 /* detach */
4544 idr_for_each_entry(&resource->devices, device, i) {
4545 retcode = adm_detach(device, 0);
4546 if (retcode < SS_SUCCESS || retcode > NO_ERROR) {
4547 drbd_msg_put_info(adm_ctx->reply_skb, "failed to detach");
4548 goto out;
4549 }
4550 }
4551
4552 /* delete volumes */
4553 idr_for_each_entry(&resource->devices, device, i) {
4554 retcode = adm_del_minor(device);
4555 if (retcode != NO_ERROR) {
4556 /* "can not happen" */
4557 drbd_msg_put_info(adm_ctx->reply_skb, "failed to delete volume");
4558 goto out;
4559 }
4560 }
4561
4562 retcode = adm_del_resource(resource);
4563 out:
4564 mutex_unlock(&resource->adm_mutex);
4565 finish:
4566 adm_ctx->reply_dh->ret_code = retcode;
4567 return 0;
4568 }
4569
drbd_adm_del_resource(struct sk_buff * skb,struct genl_info * info)4570 int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info)
4571 {
4572 struct drbd_config_context *adm_ctx = info->user_ptr[0];
4573 struct drbd_resource *resource;
4574 enum drbd_ret_code retcode;
4575
4576 if (!adm_ctx->reply_skb)
4577 return 0;
4578 retcode = adm_ctx->reply_dh->ret_code;
4579 if (retcode != NO_ERROR)
4580 goto finish;
4581 resource = adm_ctx->resource;
4582
4583 mutex_lock(&resource->adm_mutex);
4584 retcode = adm_del_resource(resource);
4585 mutex_unlock(&resource->adm_mutex);
4586 finish:
4587 adm_ctx->reply_dh->ret_code = retcode;
4588 return 0;
4589 }
4590
drbd_bcast_event(struct drbd_device * device,const struct sib_info * sib)4591 void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib)
4592 {
4593 struct sk_buff *msg;
4594 struct drbd_genlmsghdr *d_out;
4595 unsigned seq;
4596 int err = -ENOMEM;
4597
4598 seq = atomic_inc_return(&drbd_genl_seq);
4599 msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
4600 if (!msg)
4601 goto failed;
4602
4603 err = -EMSGSIZE;
4604 d_out = genlmsg_put(msg, 0, seq, &drbd_genl_family, 0, DRBD_EVENT);
4605 if (!d_out) /* cannot happen, but anyways. */
4606 goto nla_put_failure;
4607 d_out->minor = device_to_minor(device);
4608 d_out->ret_code = NO_ERROR;
4609
4610 if (nla_put_status_info(msg, device, sib))
4611 goto nla_put_failure;
4612 genlmsg_end(msg, d_out);
4613 err = drbd_genl_multicast_events(msg, GFP_NOWAIT);
4614 /* msg has been consumed or freed in netlink_broadcast() */
4615 if (err && err != -ESRCH)
4616 goto failed;
4617
4618 return;
4619
4620 nla_put_failure:
4621 nlmsg_free(msg);
4622 failed:
4623 drbd_err(device, "Error %d while broadcasting event. "
4624 "Event seq:%u sib_reason:%u\n",
4625 err, seq, sib->sib_reason);
4626 }
4627
nla_put_notification_header(struct sk_buff * msg,enum drbd_notification_type type)4628 static int nla_put_notification_header(struct sk_buff *msg,
4629 enum drbd_notification_type type)
4630 {
4631 struct drbd_notification_header nh = {
4632 .nh_type = type,
4633 };
4634
4635 return drbd_notification_header_to_skb(msg, &nh, true);
4636 }
4637
notify_resource_state(struct sk_buff * skb,unsigned int seq,struct drbd_resource * resource,struct resource_info * resource_info,enum drbd_notification_type type)4638 int notify_resource_state(struct sk_buff *skb,
4639 unsigned int seq,
4640 struct drbd_resource *resource,
4641 struct resource_info *resource_info,
4642 enum drbd_notification_type type)
4643 {
4644 struct resource_statistics resource_statistics;
4645 struct drbd_genlmsghdr *dh;
4646 bool multicast = false;
4647 int err;
4648
4649 if (!skb) {
4650 seq = atomic_inc_return(¬ify_genl_seq);
4651 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
4652 err = -ENOMEM;
4653 if (!skb)
4654 goto failed;
4655 multicast = true;
4656 }
4657
4658 err = -EMSGSIZE;
4659 dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_RESOURCE_STATE);
4660 if (!dh)
4661 goto nla_put_failure;
4662 dh->minor = -1U;
4663 dh->ret_code = NO_ERROR;
4664 if (nla_put_drbd_cfg_context(skb, resource, NULL, NULL) ||
4665 nla_put_notification_header(skb, type) ||
4666 ((type & ~NOTIFY_FLAGS) != NOTIFY_DESTROY &&
4667 resource_info_to_skb(skb, resource_info, true)))
4668 goto nla_put_failure;
4669 resource_statistics.res_stat_write_ordering = resource->write_ordering;
4670 err = resource_statistics_to_skb(skb, &resource_statistics, !capable(CAP_SYS_ADMIN));
4671 if (err)
4672 goto nla_put_failure;
4673 genlmsg_end(skb, dh);
4674 if (multicast) {
4675 err = drbd_genl_multicast_events(skb, GFP_NOWAIT);
4676 /* skb has been consumed or freed in netlink_broadcast() */
4677 if (err && err != -ESRCH)
4678 goto failed;
4679 }
4680 return 0;
4681
4682 nla_put_failure:
4683 nlmsg_free(skb);
4684 failed:
4685 drbd_err(resource, "Error %d while broadcasting event. Event seq:%u\n",
4686 err, seq);
4687 return err;
4688 }
4689
notify_device_state(struct sk_buff * skb,unsigned int seq,struct drbd_device * device,struct device_info * device_info,enum drbd_notification_type type)4690 int notify_device_state(struct sk_buff *skb,
4691 unsigned int seq,
4692 struct drbd_device *device,
4693 struct device_info *device_info,
4694 enum drbd_notification_type type)
4695 {
4696 struct device_statistics device_statistics;
4697 struct drbd_genlmsghdr *dh;
4698 bool multicast = false;
4699 int err;
4700
4701 if (!skb) {
4702 seq = atomic_inc_return(¬ify_genl_seq);
4703 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
4704 err = -ENOMEM;
4705 if (!skb)
4706 goto failed;
4707 multicast = true;
4708 }
4709
4710 err = -EMSGSIZE;
4711 dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_DEVICE_STATE);
4712 if (!dh)
4713 goto nla_put_failure;
4714 dh->minor = device->minor;
4715 dh->ret_code = NO_ERROR;
4716 if (nla_put_drbd_cfg_context(skb, device->resource, NULL, device) ||
4717 nla_put_notification_header(skb, type) ||
4718 ((type & ~NOTIFY_FLAGS) != NOTIFY_DESTROY &&
4719 device_info_to_skb(skb, device_info, true)))
4720 goto nla_put_failure;
4721 device_to_statistics(&device_statistics, device);
4722 device_statistics_to_skb(skb, &device_statistics, !capable(CAP_SYS_ADMIN));
4723 genlmsg_end(skb, dh);
4724 if (multicast) {
4725 err = drbd_genl_multicast_events(skb, GFP_NOWAIT);
4726 /* skb has been consumed or freed in netlink_broadcast() */
4727 if (err && err != -ESRCH)
4728 goto failed;
4729 }
4730 return 0;
4731
4732 nla_put_failure:
4733 nlmsg_free(skb);
4734 failed:
4735 drbd_err(device, "Error %d while broadcasting event. Event seq:%u\n",
4736 err, seq);
4737 return err;
4738 }
4739
notify_connection_state(struct sk_buff * skb,unsigned int seq,struct drbd_connection * connection,struct connection_info * connection_info,enum drbd_notification_type type)4740 int notify_connection_state(struct sk_buff *skb,
4741 unsigned int seq,
4742 struct drbd_connection *connection,
4743 struct connection_info *connection_info,
4744 enum drbd_notification_type type)
4745 {
4746 struct connection_statistics connection_statistics;
4747 struct drbd_genlmsghdr *dh;
4748 bool multicast = false;
4749 int err;
4750
4751 if (!skb) {
4752 seq = atomic_inc_return(¬ify_genl_seq);
4753 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
4754 err = -ENOMEM;
4755 if (!skb)
4756 goto failed;
4757 multicast = true;
4758 }
4759
4760 err = -EMSGSIZE;
4761 dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_CONNECTION_STATE);
4762 if (!dh)
4763 goto nla_put_failure;
4764 dh->minor = -1U;
4765 dh->ret_code = NO_ERROR;
4766 if (nla_put_drbd_cfg_context(skb, connection->resource, connection, NULL) ||
4767 nla_put_notification_header(skb, type) ||
4768 ((type & ~NOTIFY_FLAGS) != NOTIFY_DESTROY &&
4769 connection_info_to_skb(skb, connection_info, true)))
4770 goto nla_put_failure;
4771 connection_statistics.conn_congested = test_bit(NET_CONGESTED, &connection->flags);
4772 connection_statistics_to_skb(skb, &connection_statistics, !capable(CAP_SYS_ADMIN));
4773 genlmsg_end(skb, dh);
4774 if (multicast) {
4775 err = drbd_genl_multicast_events(skb, GFP_NOWAIT);
4776 /* skb has been consumed or freed in netlink_broadcast() */
4777 if (err && err != -ESRCH)
4778 goto failed;
4779 }
4780 return 0;
4781
4782 nla_put_failure:
4783 nlmsg_free(skb);
4784 failed:
4785 drbd_err(connection, "Error %d while broadcasting event. Event seq:%u\n",
4786 err, seq);
4787 return err;
4788 }
4789
notify_peer_device_state(struct sk_buff * skb,unsigned int seq,struct drbd_peer_device * peer_device,struct peer_device_info * peer_device_info,enum drbd_notification_type type)4790 int notify_peer_device_state(struct sk_buff *skb,
4791 unsigned int seq,
4792 struct drbd_peer_device *peer_device,
4793 struct peer_device_info *peer_device_info,
4794 enum drbd_notification_type type)
4795 {
4796 struct peer_device_statistics peer_device_statistics;
4797 struct drbd_resource *resource = peer_device->device->resource;
4798 struct drbd_genlmsghdr *dh;
4799 bool multicast = false;
4800 int err;
4801
4802 if (!skb) {
4803 seq = atomic_inc_return(¬ify_genl_seq);
4804 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
4805 err = -ENOMEM;
4806 if (!skb)
4807 goto failed;
4808 multicast = true;
4809 }
4810
4811 err = -EMSGSIZE;
4812 dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_PEER_DEVICE_STATE);
4813 if (!dh)
4814 goto nla_put_failure;
4815 dh->minor = -1U;
4816 dh->ret_code = NO_ERROR;
4817 if (nla_put_drbd_cfg_context(skb, resource, peer_device->connection, peer_device->device) ||
4818 nla_put_notification_header(skb, type) ||
4819 ((type & ~NOTIFY_FLAGS) != NOTIFY_DESTROY &&
4820 peer_device_info_to_skb(skb, peer_device_info, true)))
4821 goto nla_put_failure;
4822 peer_device_to_statistics(&peer_device_statistics, peer_device);
4823 peer_device_statistics_to_skb(skb, &peer_device_statistics, !capable(CAP_SYS_ADMIN));
4824 genlmsg_end(skb, dh);
4825 if (multicast) {
4826 err = drbd_genl_multicast_events(skb, GFP_NOWAIT);
4827 /* skb has been consumed or freed in netlink_broadcast() */
4828 if (err && err != -ESRCH)
4829 goto failed;
4830 }
4831 return 0;
4832
4833 nla_put_failure:
4834 nlmsg_free(skb);
4835 failed:
4836 drbd_err(peer_device, "Error %d while broadcasting event. Event seq:%u\n",
4837 err, seq);
4838 return err;
4839 }
4840
notify_helper(enum drbd_notification_type type,struct drbd_device * device,struct drbd_connection * connection,const char * name,int status)4841 void notify_helper(enum drbd_notification_type type,
4842 struct drbd_device *device, struct drbd_connection *connection,
4843 const char *name, int status)
4844 {
4845 struct drbd_resource *resource = device ? device->resource : connection->resource;
4846 struct drbd_helper_info helper_info;
4847 unsigned int seq = atomic_inc_return(¬ify_genl_seq);
4848 struct sk_buff *skb = NULL;
4849 struct drbd_genlmsghdr *dh;
4850 int err;
4851
4852 strscpy(helper_info.helper_name, name, sizeof(helper_info.helper_name));
4853 helper_info.helper_name_len = min(strlen(name), sizeof(helper_info.helper_name));
4854 helper_info.helper_status = status;
4855
4856 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
4857 err = -ENOMEM;
4858 if (!skb)
4859 goto fail;
4860
4861 err = -EMSGSIZE;
4862 dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_HELPER);
4863 if (!dh)
4864 goto fail;
4865 dh->minor = device ? device->minor : -1;
4866 dh->ret_code = NO_ERROR;
4867 mutex_lock(¬ification_mutex);
4868 if (nla_put_drbd_cfg_context(skb, resource, connection, device) ||
4869 nla_put_notification_header(skb, type) ||
4870 drbd_helper_info_to_skb(skb, &helper_info, true))
4871 goto unlock_fail;
4872 genlmsg_end(skb, dh);
4873 err = drbd_genl_multicast_events(skb, GFP_NOWAIT);
4874 skb = NULL;
4875 /* skb has been consumed or freed in netlink_broadcast() */
4876 if (err && err != -ESRCH)
4877 goto unlock_fail;
4878 mutex_unlock(¬ification_mutex);
4879 return;
4880
4881 unlock_fail:
4882 mutex_unlock(¬ification_mutex);
4883 fail:
4884 nlmsg_free(skb);
4885 drbd_err(resource, "Error %d while broadcasting event. Event seq:%u\n",
4886 err, seq);
4887 }
4888
notify_initial_state_done(struct sk_buff * skb,unsigned int seq)4889 static int notify_initial_state_done(struct sk_buff *skb, unsigned int seq)
4890 {
4891 struct drbd_genlmsghdr *dh;
4892 int err;
4893
4894 err = -EMSGSIZE;
4895 dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_INITIAL_STATE_DONE);
4896 if (!dh)
4897 goto nla_put_failure;
4898 dh->minor = -1U;
4899 dh->ret_code = NO_ERROR;
4900 if (nla_put_notification_header(skb, NOTIFY_EXISTS))
4901 goto nla_put_failure;
4902 genlmsg_end(skb, dh);
4903 return 0;
4904
4905 nla_put_failure:
4906 nlmsg_free(skb);
4907 pr_err("Error %d sending event. Event seq:%u\n", err, seq);
4908 return err;
4909 }
4910
free_state_changes(struct list_head * list)4911 static void free_state_changes(struct list_head *list)
4912 {
4913 while (!list_empty(list)) {
4914 struct drbd_state_change *state_change =
4915 list_first_entry(list, struct drbd_state_change, list);
4916 list_del(&state_change->list);
4917 forget_state_change(state_change);
4918 }
4919 }
4920
notifications_for_state_change(struct drbd_state_change * state_change)4921 static unsigned int notifications_for_state_change(struct drbd_state_change *state_change)
4922 {
4923 return 1 +
4924 state_change->n_connections +
4925 state_change->n_devices +
4926 state_change->n_devices * state_change->n_connections;
4927 }
4928
get_initial_state(struct sk_buff * skb,struct netlink_callback * cb)4929 static int get_initial_state(struct sk_buff *skb, struct netlink_callback *cb)
4930 {
4931 struct drbd_state_change *state_change = (struct drbd_state_change *)cb->args[0];
4932 unsigned int seq = cb->args[2];
4933 unsigned int n;
4934 enum drbd_notification_type flags = 0;
4935 int err = 0;
4936
4937 /* There is no need for taking notification_mutex here: it doesn't
4938 matter if the initial state events mix with later state chage
4939 events; we can always tell the events apart by the NOTIFY_EXISTS
4940 flag. */
4941
4942 cb->args[5]--;
4943 if (cb->args[5] == 1) {
4944 err = notify_initial_state_done(skb, seq);
4945 goto out;
4946 }
4947 n = cb->args[4]++;
4948 if (cb->args[4] < cb->args[3])
4949 flags |= NOTIFY_CONTINUES;
4950 if (n < 1) {
4951 err = notify_resource_state_change(skb, seq, state_change->resource,
4952 NOTIFY_EXISTS | flags);
4953 goto next;
4954 }
4955 n--;
4956 if (n < state_change->n_connections) {
4957 err = notify_connection_state_change(skb, seq, &state_change->connections[n],
4958 NOTIFY_EXISTS | flags);
4959 goto next;
4960 }
4961 n -= state_change->n_connections;
4962 if (n < state_change->n_devices) {
4963 err = notify_device_state_change(skb, seq, &state_change->devices[n],
4964 NOTIFY_EXISTS | flags);
4965 goto next;
4966 }
4967 n -= state_change->n_devices;
4968 if (n < state_change->n_devices * state_change->n_connections) {
4969 err = notify_peer_device_state_change(skb, seq, &state_change->peer_devices[n],
4970 NOTIFY_EXISTS | flags);
4971 goto next;
4972 }
4973
4974 next:
4975 if (cb->args[4] == cb->args[3]) {
4976 struct drbd_state_change *next_state_change =
4977 list_entry(state_change->list.next,
4978 struct drbd_state_change, list);
4979 cb->args[0] = (long)next_state_change;
4980 cb->args[3] = notifications_for_state_change(next_state_change);
4981 cb->args[4] = 0;
4982 }
4983 out:
4984 if (err)
4985 return err;
4986 else
4987 return skb->len;
4988 }
4989
drbd_adm_get_initial_state(struct sk_buff * skb,struct netlink_callback * cb)4990 int drbd_adm_get_initial_state(struct sk_buff *skb, struct netlink_callback *cb)
4991 {
4992 struct drbd_resource *resource;
4993 LIST_HEAD(head);
4994
4995 if (cb->args[5] >= 1) {
4996 if (cb->args[5] > 1)
4997 return get_initial_state(skb, cb);
4998 if (cb->args[0]) {
4999 struct drbd_state_change *state_change =
5000 (struct drbd_state_change *)cb->args[0];
5001
5002 /* connect list to head */
5003 list_add(&head, &state_change->list);
5004 free_state_changes(&head);
5005 }
5006 return 0;
5007 }
5008
5009 cb->args[5] = 2; /* number of iterations */
5010 mutex_lock(&resources_mutex);
5011 for_each_resource(resource, &drbd_resources) {
5012 struct drbd_state_change *state_change;
5013
5014 state_change = remember_old_state(resource, GFP_KERNEL);
5015 if (!state_change) {
5016 if (!list_empty(&head))
5017 free_state_changes(&head);
5018 mutex_unlock(&resources_mutex);
5019 return -ENOMEM;
5020 }
5021 copy_old_to_new_state_change(state_change);
5022 list_add_tail(&state_change->list, &head);
5023 cb->args[5] += notifications_for_state_change(state_change);
5024 }
5025 mutex_unlock(&resources_mutex);
5026
5027 if (!list_empty(&head)) {
5028 struct drbd_state_change *state_change =
5029 list_entry(head.next, struct drbd_state_change, list);
5030 cb->args[0] = (long)state_change;
5031 cb->args[3] = notifications_for_state_change(state_change);
5032 list_del(&head); /* detach list from head */
5033 }
5034
5035 cb->args[2] = cb->nlh->nlmsg_seq;
5036 return get_initial_state(skb, cb);
5037 }
5038