--- zzzz-none-000/linux-3.10.107/drivers/block/drbd/drbd_nl.c 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/drivers/block/drbd/drbd_nl.c 2021-02-04 17:41:59.000000000 +0000 @@ -23,6 +23,8 @@ */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include @@ -32,8 +34,8 @@ #include #include #include "drbd_int.h" +#include "drbd_protocol.h" #include "drbd_req.h" -#include "drbd_wrappers.h" #include #include #include @@ -44,8 +46,8 @@ // int drbd_adm_create_resource(struct sk_buff *skb, struct genl_info *info); // int drbd_adm_delete_resource(struct sk_buff *skb, struct genl_info *info); -int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info); -int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info); +int drbd_adm_new_minor(struct sk_buff *skb, struct genl_info *info); +int drbd_adm_del_minor(struct sk_buff *skb, struct genl_info *info); int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info); int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info); @@ -81,43 +83,17 @@ /* used blkdev_get_by_path, to claim our meta data device(s) */ static char *drbd_m_holder = "Hands off! this is DRBD's meta data device."; -/* Configuration is strictly serialized, because generic netlink message - * processing is strictly serialized by the genl_lock(). - * Which means we can use one static global drbd_config_context struct. - */ -static struct drbd_config_context { - /* assigned from drbd_genlmsghdr */ - unsigned int minor; - /* assigned from request attributes, if present */ - unsigned int volume; -#define VOLUME_UNSPECIFIED (-1U) - /* pointer into the request skb, - * limited lifetime! */ - char *resource_name; - struct nlattr *my_addr; - struct nlattr *peer_addr; - - /* reply buffer */ - struct sk_buff *reply_skb; - /* pointer into reply buffer */ - struct drbd_genlmsghdr *reply_dh; - /* resolved from attributes, if possible */ - struct drbd_conf *mdev; - struct drbd_tconn *tconn; -} adm_ctx; - static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info) { genlmsg_end(skb, genlmsg_data(nlmsg_data(nlmsg_hdr(skb)))); if (genlmsg_reply(skb, info)) - printk(KERN_ERR "drbd: error sending genl reply\n"); + pr_err("error sending genl reply\n"); } /* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only * reason it could fail was no space in skb, and there are 4k available. */ -int drbd_msg_put_info(const char *info) +static int drbd_msg_put_info(struct sk_buff *skb, const char *info) { - struct sk_buff *skb = adm_ctx.reply_skb; struct nlattr *nla; int err = -EMSGSIZE; @@ -141,42 +117,46 @@ * and per-family private info->pointers. * But we need to stay compatible with older kernels. * If it returns successfully, adm_ctx members are valid. + * + * At this point, we still rely on the global genl_lock(). + * If we want to avoid that, and allow "genl_family.parallel_ops", we may need + * to add additional synchronization against object destruction/modification. */ #define DRBD_ADM_NEED_MINOR 1 #define DRBD_ADM_NEED_RESOURCE 2 #define DRBD_ADM_NEED_CONNECTION 4 -static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info, - unsigned flags) +static int drbd_adm_prepare(struct drbd_config_context *adm_ctx, + struct sk_buff *skb, struct genl_info *info, unsigned flags) { struct drbd_genlmsghdr *d_in = info->userhdr; const u8 cmd = info->genlhdr->cmd; int err; - memset(&adm_ctx, 0, sizeof(adm_ctx)); + memset(adm_ctx, 0, sizeof(*adm_ctx)); /* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */ if (cmd != DRBD_ADM_GET_STATUS && !capable(CAP_NET_ADMIN)) return -EPERM; - adm_ctx.reply_skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); - if (!adm_ctx.reply_skb) { + adm_ctx->reply_skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); + if (!adm_ctx->reply_skb) { err = -ENOMEM; goto fail; } - adm_ctx.reply_dh = genlmsg_put_reply(adm_ctx.reply_skb, + adm_ctx->reply_dh = genlmsg_put_reply(adm_ctx->reply_skb, info, &drbd_genl_family, 0, cmd); /* put of a few bytes into a fresh skb of >= 4k will always succeed. * but anyways */ - if (!adm_ctx.reply_dh) { + if (!adm_ctx->reply_dh) { err = -ENOMEM; goto fail; } - adm_ctx.reply_dh->minor = d_in->minor; - adm_ctx.reply_dh->ret_code = NO_ERROR; + adm_ctx->reply_dh->minor = d_in->minor; + adm_ctx->reply_dh->ret_code = NO_ERROR; - adm_ctx.volume = VOLUME_UNSPECIFIED; + adm_ctx->volume = VOLUME_UNSPECIFIED; if (info->attrs[DRBD_NLA_CFG_CONTEXT]) { struct nlattr *nla; /* parse and validate only */ @@ -186,133 +166,162 @@ /* It was present, and valid, * copy it over to the reply skb. */ - err = nla_put_nohdr(adm_ctx.reply_skb, + err = nla_put_nohdr(adm_ctx->reply_skb, info->attrs[DRBD_NLA_CFG_CONTEXT]->nla_len, info->attrs[DRBD_NLA_CFG_CONTEXT]); if (err) goto fail; - /* and assign stuff to the global adm_ctx */ + /* and assign stuff to the adm_ctx */ nla = nested_attr_tb[__nla_type(T_ctx_volume)]; if (nla) - adm_ctx.volume = nla_get_u32(nla); + adm_ctx->volume = nla_get_u32(nla); nla = nested_attr_tb[__nla_type(T_ctx_resource_name)]; if (nla) - adm_ctx.resource_name = nla_data(nla); - adm_ctx.my_addr = nested_attr_tb[__nla_type(T_ctx_my_addr)]; - adm_ctx.peer_addr = nested_attr_tb[__nla_type(T_ctx_peer_addr)]; - if ((adm_ctx.my_addr && - nla_len(adm_ctx.my_addr) > sizeof(adm_ctx.tconn->my_addr)) || - (adm_ctx.peer_addr && - nla_len(adm_ctx.peer_addr) > sizeof(adm_ctx.tconn->peer_addr))) { + adm_ctx->resource_name = nla_data(nla); + adm_ctx->my_addr = nested_attr_tb[__nla_type(T_ctx_my_addr)]; + adm_ctx->peer_addr = nested_attr_tb[__nla_type(T_ctx_peer_addr)]; + if ((adm_ctx->my_addr && + nla_len(adm_ctx->my_addr) > sizeof(adm_ctx->connection->my_addr)) || + (adm_ctx->peer_addr && + nla_len(adm_ctx->peer_addr) > sizeof(adm_ctx->connection->peer_addr))) { err = -EINVAL; goto fail; } } - adm_ctx.minor = d_in->minor; - adm_ctx.mdev = minor_to_mdev(d_in->minor); - adm_ctx.tconn = conn_get_by_name(adm_ctx.resource_name); + adm_ctx->minor = d_in->minor; + adm_ctx->device = minor_to_device(d_in->minor); + + /* We are protected by the global genl_lock(). + * But we may explicitly drop it/retake it in drbd_adm_set_role(), + * so make sure this object stays around. */ + if (adm_ctx->device) + kref_get(&adm_ctx->device->kref); + + if (adm_ctx->resource_name) { + adm_ctx->resource = drbd_find_resource(adm_ctx->resource_name); + } - if (!adm_ctx.mdev && (flags & DRBD_ADM_NEED_MINOR)) { - drbd_msg_put_info("unknown minor"); + if (!adm_ctx->device && (flags & DRBD_ADM_NEED_MINOR)) { + drbd_msg_put_info(adm_ctx->reply_skb, "unknown minor"); return ERR_MINOR_INVALID; } - if (!adm_ctx.tconn && (flags & DRBD_ADM_NEED_RESOURCE)) { - drbd_msg_put_info("unknown resource"); + if (!adm_ctx->resource && (flags & DRBD_ADM_NEED_RESOURCE)) { + drbd_msg_put_info(adm_ctx->reply_skb, "unknown resource"); + if (adm_ctx->resource_name) + return ERR_RES_NOT_KNOWN; return ERR_INVALID_REQUEST; } if (flags & DRBD_ADM_NEED_CONNECTION) { - if (adm_ctx.tconn && !(flags & DRBD_ADM_NEED_RESOURCE)) { - drbd_msg_put_info("no resource name expected"); + if (adm_ctx->resource) { + drbd_msg_put_info(adm_ctx->reply_skb, "no resource name expected"); return ERR_INVALID_REQUEST; } - if (adm_ctx.mdev) { - drbd_msg_put_info("no minor number expected"); + if (adm_ctx->device) { + drbd_msg_put_info(adm_ctx->reply_skb, "no minor number expected"); return ERR_INVALID_REQUEST; } - if (adm_ctx.my_addr && adm_ctx.peer_addr) - adm_ctx.tconn = conn_get_by_addrs(nla_data(adm_ctx.my_addr), - nla_len(adm_ctx.my_addr), - nla_data(adm_ctx.peer_addr), - nla_len(adm_ctx.peer_addr)); - if (!adm_ctx.tconn) { - drbd_msg_put_info("unknown connection"); + if (adm_ctx->my_addr && adm_ctx->peer_addr) + adm_ctx->connection = conn_get_by_addrs(nla_data(adm_ctx->my_addr), + nla_len(adm_ctx->my_addr), + nla_data(adm_ctx->peer_addr), + nla_len(adm_ctx->peer_addr)); + if (!adm_ctx->connection) { + drbd_msg_put_info(adm_ctx->reply_skb, "unknown connection"); return ERR_INVALID_REQUEST; } } /* some more paranoia, if the request was over-determined */ - if (adm_ctx.mdev && adm_ctx.tconn && - adm_ctx.mdev->tconn != adm_ctx.tconn) { - pr_warning("request: minor=%u, resource=%s; but that minor belongs to connection %s\n", - adm_ctx.minor, adm_ctx.resource_name, - adm_ctx.mdev->tconn->name); - drbd_msg_put_info("minor exists in different resource"); + if (adm_ctx->device && adm_ctx->resource && + adm_ctx->device->resource != adm_ctx->resource) { + pr_warning("request: minor=%u, resource=%s; but that minor belongs to resource %s\n", + adm_ctx->minor, adm_ctx->resource->name, + adm_ctx->device->resource->name); + drbd_msg_put_info(adm_ctx->reply_skb, "minor exists in different resource"); return ERR_INVALID_REQUEST; } - if (adm_ctx.mdev && - adm_ctx.volume != VOLUME_UNSPECIFIED && - adm_ctx.volume != adm_ctx.mdev->vnr) { + if (adm_ctx->device && + adm_ctx->volume != VOLUME_UNSPECIFIED && + adm_ctx->volume != adm_ctx->device->vnr) { pr_warning("request: minor=%u, volume=%u; but that minor is volume %u in %s\n", - adm_ctx.minor, adm_ctx.volume, - adm_ctx.mdev->vnr, adm_ctx.mdev->tconn->name); - drbd_msg_put_info("minor exists as different volume"); + adm_ctx->minor, adm_ctx->volume, + adm_ctx->device->vnr, + adm_ctx->device->resource->name); + drbd_msg_put_info(adm_ctx->reply_skb, "minor exists as different volume"); return ERR_INVALID_REQUEST; } + /* still, provide adm_ctx->resource always, if possible. */ + if (!adm_ctx->resource) { + adm_ctx->resource = adm_ctx->device ? adm_ctx->device->resource + : adm_ctx->connection ? adm_ctx->connection->resource : NULL; + if (adm_ctx->resource) + kref_get(&adm_ctx->resource->kref); + } + return NO_ERROR; fail: - nlmsg_free(adm_ctx.reply_skb); - adm_ctx.reply_skb = NULL; + nlmsg_free(adm_ctx->reply_skb); + adm_ctx->reply_skb = NULL; return err; } -static int drbd_adm_finish(struct genl_info *info, int retcode) +static int drbd_adm_finish(struct drbd_config_context *adm_ctx, + struct genl_info *info, int retcode) { - if (adm_ctx.tconn) { - kref_put(&adm_ctx.tconn->kref, &conn_destroy); - adm_ctx.tconn = NULL; + if (adm_ctx->device) { + kref_put(&adm_ctx->device->kref, drbd_destroy_device); + adm_ctx->device = NULL; + } + if (adm_ctx->connection) { + kref_put(&adm_ctx->connection->kref, &drbd_destroy_connection); + adm_ctx->connection = NULL; + } + if (adm_ctx->resource) { + kref_put(&adm_ctx->resource->kref, drbd_destroy_resource); + adm_ctx->resource = NULL; } - if (!adm_ctx.reply_skb) + if (!adm_ctx->reply_skb) return -ENOMEM; - adm_ctx.reply_dh->ret_code = retcode; - drbd_adm_send_reply(adm_ctx.reply_skb, info); + adm_ctx->reply_dh->ret_code = retcode; + drbd_adm_send_reply(adm_ctx->reply_skb, info); return 0; } -static void setup_khelper_env(struct drbd_tconn *tconn, char **envp) +static void setup_khelper_env(struct drbd_connection *connection, char **envp) { char *afs; /* FIXME: A future version will not allow this case. */ - if (tconn->my_addr_len == 0 || tconn->peer_addr_len == 0) + if (connection->my_addr_len == 0 || connection->peer_addr_len == 0) return; - switch (((struct sockaddr *)&tconn->peer_addr)->sa_family) { + switch (((struct sockaddr *)&connection->peer_addr)->sa_family) { case AF_INET6: afs = "ipv6"; snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI6", - &((struct sockaddr_in6 *)&tconn->peer_addr)->sin6_addr); + &((struct sockaddr_in6 *)&connection->peer_addr)->sin6_addr); break; case AF_INET: afs = "ipv4"; snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4", - &((struct sockaddr_in *)&tconn->peer_addr)->sin_addr); + &((struct sockaddr_in *)&connection->peer_addr)->sin_addr); break; default: afs = "ssocks"; snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4", - &((struct sockaddr_in *)&tconn->peer_addr)->sin_addr); + &((struct sockaddr_in *)&connection->peer_addr)->sin_addr); } snprintf(envp[3], 20, "DRBD_PEER_AF=%s", afs); } -int drbd_khelper(struct drbd_conf *mdev, char *cmd) +int drbd_khelper(struct drbd_device *device, char *cmd) { char *envp[] = { "HOME=/", "TERM=linux", @@ -322,39 +331,39 @@ NULL }; char mb[12]; char *argv[] = {usermode_helper, cmd, mb, NULL }; - struct drbd_tconn *tconn = mdev->tconn; + struct drbd_connection *connection = first_peer_device(device)->connection; struct sib_info sib; int ret; - if (current == tconn->worker.task) - set_bit(CALLBACK_PENDING, &tconn->flags); + if (current == connection->worker.task) + set_bit(CALLBACK_PENDING, &connection->flags); - snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev)); - setup_khelper_env(tconn, envp); + snprintf(mb, 12, "minor-%d", device_to_minor(device)); + setup_khelper_env(connection, envp); /* The helper may take some time. * write out any unsynced meta data changes now */ - drbd_md_sync(mdev); + drbd_md_sync(device); - dev_info(DEV, "helper command: %s %s %s\n", usermode_helper, cmd, mb); + drbd_info(device, "helper command: %s %s %s\n", usermode_helper, cmd, mb); sib.sib_reason = SIB_HELPER_PRE; sib.helper_name = cmd; - drbd_bcast_event(mdev, &sib); + drbd_bcast_event(device, &sib); ret = call_usermodehelper(usermode_helper, argv, envp, UMH_WAIT_PROC); if (ret) - dev_warn(DEV, "helper command: %s %s %s exit code %u (0x%x)\n", + drbd_warn(device, "helper command: %s %s %s exit code %u (0x%x)\n", usermode_helper, cmd, mb, (ret >> 8) & 0xff, ret); else - dev_info(DEV, "helper command: %s %s %s exit code %u (0x%x)\n", + drbd_info(device, "helper command: %s %s %s exit code %u (0x%x)\n", usermode_helper, cmd, mb, (ret >> 8) & 0xff, ret); sib.sib_reason = SIB_HELPER_POST; sib.helper_exit_code = ret; - drbd_bcast_event(mdev, &sib); + drbd_bcast_event(device, &sib); - if (current == tconn->worker.task) - clear_bit(CALLBACK_PENDING, &tconn->flags); + if (current == connection->worker.task) + clear_bit(CALLBACK_PENDING, &connection->flags); if (ret < 0) /* Ignore any ERRNOs we got. */ ret = 0; @@ -362,7 +371,7 @@ return ret; } -int conn_khelper(struct drbd_tconn *tconn, char *cmd) +static int conn_khelper(struct drbd_connection *connection, char *cmd) { char *envp[] = { "HOME=/", "TERM=linux", @@ -370,23 +379,24 @@ (char[20]) { }, /* address family */ (char[60]) { }, /* address */ NULL }; - char *argv[] = {usermode_helper, cmd, tconn->name, NULL }; + char *resource_name = connection->resource->name; + char *argv[] = {usermode_helper, cmd, resource_name, NULL }; int ret; - setup_khelper_env(tconn, envp); - conn_md_sync(tconn); + setup_khelper_env(connection, envp); + conn_md_sync(connection); - conn_info(tconn, "helper command: %s %s %s\n", usermode_helper, cmd, tconn->name); + drbd_info(connection, "helper command: %s %s %s\n", usermode_helper, cmd, resource_name); /* TODO: conn_bcast_event() ?? */ ret = call_usermodehelper(usermode_helper, argv, envp, UMH_WAIT_PROC); if (ret) - conn_warn(tconn, "helper command: %s %s %s exit code %u (0x%x)\n", - usermode_helper, cmd, tconn->name, + drbd_warn(connection, "helper command: %s %s %s exit code %u (0x%x)\n", + usermode_helper, cmd, resource_name, (ret >> 8) & 0xff, ret); else - conn_info(tconn, "helper command: %s %s %s exit code %u (0x%x)\n", - usermode_helper, cmd, tconn->name, + drbd_info(connection, "helper command: %s %s %s exit code %u (0x%x)\n", + usermode_helper, cmd, resource_name, (ret >> 8) & 0xff, ret); /* TODO: conn_bcast_event() ?? */ @@ -396,49 +406,65 @@ return ret; } -static enum drbd_fencing_p highest_fencing_policy(struct drbd_tconn *tconn) +static enum drbd_fencing_p highest_fencing_policy(struct drbd_connection *connection) { enum drbd_fencing_p fp = FP_NOT_AVAIL; - struct drbd_conf *mdev; + struct drbd_peer_device *peer_device; int vnr; rcu_read_lock(); - idr_for_each_entry(&tconn->volumes, mdev, vnr) { - if (get_ldev_if_state(mdev, D_CONSISTENT)) { - fp = max_t(enum drbd_fencing_p, fp, - rcu_dereference(mdev->ldev->disk_conf)->fencing); - put_ldev(mdev); + idr_for_each_entry(&connection->peer_devices, peer_device, vnr) { + struct drbd_device *device = peer_device->device; + if (get_ldev_if_state(device, D_CONSISTENT)) { + struct disk_conf *disk_conf = + rcu_dereference(peer_device->device->ldev->disk_conf); + fp = max_t(enum drbd_fencing_p, fp, disk_conf->fencing); + put_ldev(device); } } rcu_read_unlock(); + if (fp == FP_NOT_AVAIL) { + /* IO Suspending works on the whole resource. + Do it only for one device. */ + vnr = 0; + peer_device = idr_get_next(&connection->peer_devices, &vnr); + drbd_change_state(peer_device->device, CS_VERBOSE | CS_HARD, NS(susp_fen, 0)); + } + return fp; } -bool conn_try_outdate_peer(struct drbd_tconn *tconn) +bool conn_try_outdate_peer(struct drbd_connection *connection) { + unsigned int connect_cnt; union drbd_state mask = { }; union drbd_state val = { }; enum drbd_fencing_p fp; char *ex_to_string; int r; - if (tconn->cstate >= C_WF_REPORT_PARAMS) { - conn_err(tconn, "Expected cstate < C_WF_REPORT_PARAMS\n"); + spin_lock_irq(&connection->resource->req_lock); + if (connection->cstate >= C_WF_REPORT_PARAMS) { + drbd_err(connection, "Expected cstate < C_WF_REPORT_PARAMS\n"); + spin_unlock_irq(&connection->resource->req_lock); return false; } - fp = highest_fencing_policy(tconn); + connect_cnt = connection->connect_cnt; + spin_unlock_irq(&connection->resource->req_lock); + + fp = highest_fencing_policy(connection); switch (fp) { case FP_NOT_AVAIL: - conn_warn(tconn, "Not fencing peer, I'm not even Consistent myself.\n"); + drbd_warn(connection, "Not fencing peer, I'm not even Consistent myself.\n"); goto out; case FP_DONT_CARE: return true; default: ; } - r = conn_khelper(tconn, "fence-peer"); + r = conn_khelper(connection, "fence-peer"); switch ((r>>8) & 0xff) { case 3: /* peer is inconsistent */ @@ -452,7 +478,7 @@ val.pdsk = D_OUTDATED; break; case 5: /* peer was down */ - if (conn_highest_disk(tconn) == D_UP_TO_DATE) { + if (conn_highest_disk(connection) == D_UP_TO_DATE) { /* we will(have) create(d) a new UUID anyways... */ ex_to_string = "peer is unreachable, assumed to be dead"; mask.pdsk = D_MASK; @@ -465,71 +491,79 @@ * This is useful when an unconnected R_SECONDARY is asked to * become R_PRIMARY, but finds the other peer being active. */ ex_to_string = "peer is active"; - conn_warn(tconn, "Peer is primary, outdating myself.\n"); + drbd_warn(connection, "Peer is primary, outdating myself.\n"); mask.disk = D_MASK; val.disk = D_OUTDATED; break; case 7: if (fp != FP_STONITH) - conn_err(tconn, "fence-peer() = 7 && fencing != Stonith !!!\n"); + drbd_err(connection, "fence-peer() = 7 && fencing != Stonith !!!\n"); ex_to_string = "peer was stonithed"; mask.pdsk = D_MASK; val.pdsk = D_OUTDATED; break; default: /* The script is broken ... */ - conn_err(tconn, "fence-peer helper broken, returned %d\n", (r>>8)&0xff); + drbd_err(connection, "fence-peer helper broken, returned %d\n", (r>>8)&0xff); return false; /* Eventually leave IO frozen */ } - conn_info(tconn, "fence-peer helper returned %d (%s)\n", + drbd_info(connection, "fence-peer helper returned %d (%s)\n", (r>>8) & 0xff, ex_to_string); out: /* Not using - conn_request_state(tconn, mask, val, CS_VERBOSE); + conn_request_state(connection, mask, val, CS_VERBOSE); here, because we might were able to re-establish the connection in the meantime. */ - spin_lock_irq(&tconn->req_lock); - if (tconn->cstate < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &tconn->flags)) - _conn_request_state(tconn, mask, val, CS_VERBOSE); - spin_unlock_irq(&tconn->req_lock); + spin_lock_irq(&connection->resource->req_lock); + if (connection->cstate < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &connection->flags)) { + if (connection->connect_cnt != connect_cnt) + /* In case the connection was established and droped + while the fence-peer handler was running, ignore it */ + drbd_info(connection, "Ignoring fence-peer exit code\n"); + else + _conn_request_state(connection, mask, val, CS_VERBOSE); + } + spin_unlock_irq(&connection->resource->req_lock); - return conn_highest_pdsk(tconn) <= D_OUTDATED; + return conn_highest_pdsk(connection) <= D_OUTDATED; } static int _try_outdate_peer_async(void *data) { - struct drbd_tconn *tconn = (struct drbd_tconn *)data; + struct drbd_connection *connection = (struct drbd_connection *)data; - conn_try_outdate_peer(tconn); + conn_try_outdate_peer(connection); - kref_put(&tconn->kref, &conn_destroy); + kref_put(&connection->kref, drbd_destroy_connection); return 0; } -void conn_try_outdate_peer_async(struct drbd_tconn *tconn) +void conn_try_outdate_peer_async(struct drbd_connection *connection) { struct task_struct *opa; - kref_get(&tconn->kref); + kref_get(&connection->kref); /* We may just have force_sig()'ed this thread * to get it out of some blocking network function. * Clear signals; otherwise kthread_run(), which internally uses * wait_on_completion_killable(), will mistake our pending signal * for a new fatal signal and fail. */ flush_signals(current); - opa = kthread_run(_try_outdate_peer_async, tconn, "drbd_async_h"); + opa = kthread_run(_try_outdate_peer_async, connection, "drbd_async_h"); if (IS_ERR(opa)) { - conn_err(tconn, "out of mem, failed to invoke fence-peer helper\n"); - kref_put(&tconn->kref, &conn_destroy); + drbd_err(connection, "out of mem, failed to invoke fence-peer helper\n"); + kref_put(&connection->kref, drbd_destroy_connection); } } enum drbd_state_rv -drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force) +drbd_set_role(struct drbd_device *const device, enum drbd_role new_role, int force) { + struct drbd_peer_device *const peer_device = first_peer_device(device); + struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL; const int max_tries = 4; enum drbd_state_rv rv = SS_UNKNOWN_ERROR; struct net_conf *nc; @@ -537,16 +571,24 @@ int forced = 0; union drbd_state mask, val; - if (new_role == R_PRIMARY) - request_ping(mdev->tconn); /* Detect a dead peer ASAP */ + if (new_role == R_PRIMARY) { + struct drbd_connection *connection; - mutex_lock(mdev->state_mutex); + /* Detect dead peers as soon as possible. */ + + rcu_read_lock(); + for_each_connection(connection, device->resource) + request_ping(connection); + rcu_read_unlock(); + } + + mutex_lock(device->state_mutex); mask.i = 0; mask.role = R_MASK; val.i = 0; val.role = new_role; while (try++ < max_tries) { - rv = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE); + rv = _drbd_request_state_holding_state_mutex(device, mask, val, CS_WAIT_COMPLETE); /* in case we first succeeded to outdate, * but now suddenly could establish a connection */ @@ -557,8 +599,8 @@ } if (rv == SS_NO_UP_TO_DATE_DISK && force && - (mdev->state.disk < D_UP_TO_DATE && - mdev->state.disk >= D_INCONSISTENT)) { + (device->state.disk < D_UP_TO_DATE && + device->state.disk >= D_INCONSISTENT)) { mask.disk = D_MASK; val.disk = D_UP_TO_DATE; forced = 1; @@ -566,10 +608,10 @@ } if (rv == SS_NO_UP_TO_DATE_DISK && - mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) { - D_ASSERT(mdev->state.pdsk == D_UNKNOWN); + device->state.disk == D_CONSISTENT && mask.pdsk == 0) { + D_ASSERT(device, device->state.pdsk == D_UNKNOWN); - if (conn_try_outdate_peer(mdev->tconn)) { + if (conn_try_outdate_peer(connection)) { val.disk = D_UP_TO_DATE; mask.disk = D_MASK; } @@ -579,8 +621,8 @@ if (rv == SS_NOTHING_TO_DO) goto out; if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) { - if (!conn_try_outdate_peer(mdev->tconn) && force) { - dev_warn(DEV, "Forced into split brain situation!\n"); + if (!conn_try_outdate_peer(connection) && force) { + drbd_warn(device, "Forced into split brain situation!\n"); mask.pdsk = D_MASK; val.pdsk = D_OUTDATED; @@ -592,7 +634,7 @@ retry at most once more in this case. */ int timeo; rcu_read_lock(); - nc = rcu_dereference(mdev->tconn->net_conf); + nc = rcu_dereference(connection->net_conf); timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1; rcu_read_unlock(); schedule_timeout_interruptible(timeo); @@ -601,7 +643,7 @@ continue; } if (rv < SS_SUCCESS) { - rv = _drbd_request_state(mdev, mask, val, + rv = _drbd_request_state(device, mask, val, CS_VERBOSE + CS_WAIT_COMPLETE); if (rv < SS_SUCCESS) goto out; @@ -613,53 +655,51 @@ goto out; if (forced) - dev_warn(DEV, "Forced to consider local data as UpToDate!\n"); + drbd_warn(device, "Forced to consider local data as UpToDate!\n"); /* Wait until nothing is on the fly :) */ - wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0); + wait_event(device->misc_wait, atomic_read(&device->ap_pending_cnt) == 0); /* FIXME also wait for all pending P_BARRIER_ACK? */ if (new_role == R_SECONDARY) { - set_disk_ro(mdev->vdisk, true); - if (get_ldev(mdev)) { - mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1; - put_ldev(mdev); + if (get_ldev(device)) { + device->ldev->md.uuid[UI_CURRENT] &= ~(u64)1; + put_ldev(device); } } else { - mutex_lock(&mdev->tconn->conf_update); - nc = mdev->tconn->net_conf; + mutex_lock(&device->resource->conf_update); + nc = connection->net_conf; if (nc) nc->discard_my_data = 0; /* without copy; single bit op is atomic */ - mutex_unlock(&mdev->tconn->conf_update); + mutex_unlock(&device->resource->conf_update); - set_disk_ro(mdev->vdisk, false); - if (get_ldev(mdev)) { - if (((mdev->state.conn < C_CONNECTED || - mdev->state.pdsk <= D_FAILED) - && mdev->ldev->md.uuid[UI_BITMAP] == 0) || forced) - drbd_uuid_new_current(mdev); + if (get_ldev(device)) { + if (((device->state.conn < C_CONNECTED || + device->state.pdsk <= D_FAILED) + && device->ldev->md.uuid[UI_BITMAP] == 0) || forced) + drbd_uuid_new_current(device); - mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1; - put_ldev(mdev); + device->ldev->md.uuid[UI_CURRENT] |= (u64)1; + put_ldev(device); } } /* writeout of activity log covered areas of the bitmap * to stable storage done in after state change already */ - if (mdev->state.conn >= C_WF_REPORT_PARAMS) { + if (device->state.conn >= C_WF_REPORT_PARAMS) { /* if this was forced, we should consider sync */ if (forced) - drbd_send_uuids(mdev); - drbd_send_current_state(mdev); + drbd_send_uuids(peer_device); + drbd_send_current_state(peer_device); } - drbd_md_sync(mdev); - - kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE); + drbd_md_sync(device); + set_disk_ro(device->vdisk, new_role == R_SECONDARY); + kobject_uevent(&disk_to_dev(device->vdisk)->kobj, KOBJ_CHANGE); out: - mutex_unlock(mdev->state_mutex); + mutex_unlock(device->state_mutex); return rv; } @@ -673,11 +713,12 @@ int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info) { + struct drbd_config_context adm_ctx; struct set_role_parms parms; int err; enum drbd_ret_code retcode; - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) @@ -688,17 +729,22 @@ err = set_role_parms_from_attrs(&parms, info); if (err) { retcode = ERR_MANDATORY_TAG; - drbd_msg_put_info(from_attrs_err_to_txt(err)); + drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err)); goto out; } } + genl_unlock(); + mutex_lock(&adm_ctx.resource->adm_mutex); if (info->genlhdr->cmd == DRBD_ADM_PRIMARY) - retcode = drbd_set_role(adm_ctx.mdev, R_PRIMARY, parms.assume_uptodate); + retcode = drbd_set_role(adm_ctx.device, R_PRIMARY, parms.assume_uptodate); else - retcode = drbd_set_role(adm_ctx.mdev, R_SECONDARY, 0); + retcode = drbd_set_role(adm_ctx.device, R_SECONDARY, 0); + + mutex_unlock(&adm_ctx.resource->adm_mutex); + genl_lock(); out: - drbd_adm_finish(info, retcode); + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } @@ -723,7 +769,7 @@ * Activity log size used to be fixed 32kB, * but is about to become configurable. */ -static void drbd_md_set_sector_offsets(struct drbd_conf *mdev, +static void drbd_md_set_sector_offsets(struct drbd_device *device, struct drbd_backing_dev *bdev) { sector_t md_size_sect = 0; @@ -799,38 +845,43 @@ * drbd_adm_suspend_io/drbd_adm_resume_io, * which are (sub) state changes triggered by admin (drbdsetup), * and can be long lived. - * This changes an mdev->flag, is triggered by drbd internals, + * This changes an device->flag, is triggered by drbd internals, * and should be short-lived. */ -void drbd_suspend_io(struct drbd_conf *mdev) +void drbd_suspend_io(struct drbd_device *device) { - set_bit(SUSPEND_IO, &mdev->flags); - if (drbd_suspended(mdev)) + set_bit(SUSPEND_IO, &device->flags); + if (drbd_suspended(device)) return; - wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt)); + wait_event(device->misc_wait, !atomic_read(&device->ap_bio_cnt)); } -void drbd_resume_io(struct drbd_conf *mdev) +void drbd_resume_io(struct drbd_device *device) { - clear_bit(SUSPEND_IO, &mdev->flags); - wake_up(&mdev->misc_wait); + clear_bit(SUSPEND_IO, &device->flags); + wake_up(&device->misc_wait); } /** * drbd_determine_dev_size() - Sets the right device size obeying all constraints - * @mdev: DRBD device. + * @device: DRBD device. * * Returns 0 on success, negative return values indicate errors. * You should call drbd_md_sync() after calling this function. */ -enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local) +enum determine_dev_size +drbd_determine_dev_size(struct drbd_device *device, enum dds_flags flags, struct resize_parms *rs) __must_hold(local) { sector_t prev_first_sect, prev_size; /* previous meta location */ sector_t la_size_sect, u_size; + struct drbd_md *md = &device->ldev->md; + u32 prev_al_stripe_size_4k; + u32 prev_al_stripes; sector_t size; char ppb[10]; + void *buffer; int md_moved, la_size_changed; - enum determine_dev_size rv = unchanged; + enum determine_dev_size rv = DS_UNCHANGED; /* race: * application request passes inc_ap_bio, @@ -841,96 +892,149 @@ * Suspend IO right here. * still lock the act_log to not trigger ASSERTs there. */ - drbd_suspend_io(mdev); + drbd_suspend_io(device); + buffer = drbd_md_get_buffer(device, __func__); /* Lock meta-data IO */ + if (!buffer) { + drbd_resume_io(device); + return DS_ERROR; + } /* no wait necessary anymore, actually we could assert that */ - wait_event(mdev->al_wait, lc_try_lock(mdev->act_log)); + wait_event(device->al_wait, lc_try_lock(device->act_log)); + + prev_first_sect = drbd_md_first_sector(device->ldev); + prev_size = device->ldev->md.md_size_sect; + la_size_sect = device->ldev->md.la_size_sect; - prev_first_sect = drbd_md_first_sector(mdev->ldev); - prev_size = mdev->ldev->md.md_size_sect; - la_size_sect = mdev->ldev->md.la_size_sect; + if (rs) { + /* rs is non NULL if we should change the AL layout only */ + + prev_al_stripes = md->al_stripes; + prev_al_stripe_size_4k = md->al_stripe_size_4k; + + md->al_stripes = rs->al_stripes; + md->al_stripe_size_4k = rs->al_stripe_size / 4; + md->al_size_4k = (u64)rs->al_stripes * rs->al_stripe_size / 4; + } - /* TODO: should only be some assert here, not (re)init... */ - drbd_md_set_sector_offsets(mdev, mdev->ldev); + drbd_md_set_sector_offsets(device, device->ldev); rcu_read_lock(); - u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size; + u_size = rcu_dereference(device->ldev->disk_conf)->disk_size; rcu_read_unlock(); - size = drbd_new_dev_size(mdev, mdev->ldev, u_size, flags & DDSF_FORCED); + size = drbd_new_dev_size(device, device->ldev, u_size, flags & DDSF_FORCED); - if (drbd_get_capacity(mdev->this_bdev) != size || - drbd_bm_capacity(mdev) != size) { + if (size < la_size_sect) { + if (rs && u_size == 0) { + /* Remove "rs &&" later. This check should always be active, but + right now the receiver expects the permissive behavior */ + drbd_warn(device, "Implicit shrink not allowed. " + "Use --size=%llus for explicit shrink.\n", + (unsigned long long)size); + rv = DS_ERROR_SHRINK; + } + if (u_size > size) + rv = DS_ERROR_SPACE_MD; + if (rv != DS_UNCHANGED) + goto err_out; + } + + if (drbd_get_capacity(device->this_bdev) != size || + drbd_bm_capacity(device) != size) { int err; - err = drbd_bm_resize(mdev, size, !(flags & DDSF_NO_RESYNC)); + err = drbd_bm_resize(device, size, !(flags & DDSF_NO_RESYNC)); if (unlikely(err)) { /* currently there is only one error: ENOMEM! */ - size = drbd_bm_capacity(mdev)>>1; + size = drbd_bm_capacity(device)>>1; if (size == 0) { - dev_err(DEV, "OUT OF MEMORY! " + drbd_err(device, "OUT OF MEMORY! " "Could not allocate bitmap!\n"); } else { - dev_err(DEV, "BM resizing failed. " + drbd_err(device, "BM resizing failed. " "Leaving size unchanged at size = %lu KB\n", (unsigned long)size); } - rv = dev_size_error; + rv = DS_ERROR; } /* racy, see comments above. */ - drbd_set_my_capacity(mdev, size); - mdev->ldev->md.la_size_sect = size; - dev_info(DEV, "size = %s (%llu KB)\n", ppsize(ppb, size>>1), + drbd_set_my_capacity(device, size); + device->ldev->md.la_size_sect = size; + drbd_info(device, "size = %s (%llu KB)\n", ppsize(ppb, size>>1), (unsigned long long)size>>1); } - if (rv == dev_size_error) - goto out; + if (rv <= DS_ERROR) + goto err_out; - la_size_changed = (la_size_sect != mdev->ldev->md.la_size_sect); + la_size_changed = (la_size_sect != device->ldev->md.la_size_sect); - md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev) - || prev_size != mdev->ldev->md.md_size_sect; + md_moved = prev_first_sect != drbd_md_first_sector(device->ldev) + || prev_size != device->ldev->md.md_size_sect; - if (la_size_changed || md_moved) { - int err; + if (la_size_changed || md_moved || rs) { + u32 prev_flags; + + /* We do some synchronous IO below, which may take some time. + * Clear the timer, to avoid scary "timer expired!" messages, + * "Superblock" is written out at least twice below, anyways. */ + del_timer(&device->md_sync_timer); + drbd_al_shrink(device); /* All extents inactive. */ + + prev_flags = md->flags; + md->flags &= ~MDF_PRIMARY_IND; + drbd_md_write(device, buffer); - drbd_al_shrink(mdev); /* All extents inactive. */ - dev_info(DEV, "Writing the whole bitmap, %s\n", + drbd_info(device, "Writing the whole bitmap, %s\n", la_size_changed && md_moved ? "size changed and md moved" : la_size_changed ? "size changed" : "md moved"); /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */ - err = drbd_bitmap_io(mdev, md_moved ? &drbd_bm_write_all : &drbd_bm_write, - "size changed", BM_LOCKED_MASK); - if (err) { - rv = dev_size_error; - goto out; - } - drbd_md_mark_dirty(mdev); + drbd_bitmap_io(device, md_moved ? &drbd_bm_write_all : &drbd_bm_write, + "size changed", BM_LOCKED_MASK); + drbd_initialize_al(device, buffer); + + md->flags = prev_flags; + drbd_md_write(device, buffer); + + if (rs) + drbd_info(device, "Changed AL layout to al-stripes = %d, al-stripe-size-kB = %d\n", + md->al_stripes, md->al_stripe_size_4k * 4); } if (size > la_size_sect) - rv = grew; + rv = la_size_sect ? DS_GREW : DS_GREW_FROM_ZERO; if (size < la_size_sect) - rv = shrunk; -out: - lc_unlock(mdev->act_log); - wake_up(&mdev->al_wait); - drbd_resume_io(mdev); + rv = DS_SHRUNK; + + if (0) { + err_out: + if (rs) { + md->al_stripes = prev_al_stripes; + md->al_stripe_size_4k = prev_al_stripe_size_4k; + md->al_size_4k = (u64)prev_al_stripes * prev_al_stripe_size_4k; + + drbd_md_set_sector_offsets(device, device->ldev); + } + } + lc_unlock(device->act_log); + wake_up(&device->al_wait); + drbd_md_put_buffer(device); + drbd_resume_io(device); return rv; } sector_t -drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev, +drbd_new_dev_size(struct drbd_device *device, struct drbd_backing_dev *bdev, sector_t u_size, int assume_peer_has_space) { - sector_t p_size = mdev->p_size; /* partner's disk size. */ + sector_t p_size = device->p_size; /* partner's disk size. */ sector_t la_size_sect = bdev->md.la_size_sect; /* last agreed size. */ sector_t m_size; /* my size */ sector_t size = 0; m_size = drbd_get_max_capacity(bdev); - if (mdev->state.conn < C_CONNECTED && assume_peer_has_space) { - dev_warn(DEV, "Resize while not connected was forced by the user!\n"); + if (device->state.conn < C_CONNECTED && assume_peer_has_space) { + drbd_warn(device, "Resize while not connected was forced by the user!\n"); p_size = m_size; } @@ -952,11 +1056,11 @@ } if (size == 0) - dev_err(DEV, "Both nodes diskless!\n"); + drbd_err(device, "Both nodes diskless!\n"); if (u_size) { if (u_size > size) - dev_err(DEV, "Requested disk size is too big (%lu > %lu)\n", + drbd_err(device, "Requested disk size is too big (%lu > %lu)\n", (unsigned long)u_size>>1, (unsigned long)size>>1); else size = u_size; @@ -967,71 +1071,75 @@ /** * drbd_check_al_size() - Ensures that the AL is of the right size - * @mdev: DRBD device. + * @device: DRBD device. * * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation * failed, and 0 on success. You should call drbd_md_sync() after you called * this function. */ -static int drbd_check_al_size(struct drbd_conf *mdev, struct disk_conf *dc) +static int drbd_check_al_size(struct drbd_device *device, struct disk_conf *dc) { struct lru_cache *n, *t; struct lc_element *e; unsigned int in_use; int i; - if (mdev->act_log && - mdev->act_log->nr_elements == dc->al_extents) + if (device->act_log && + device->act_log->nr_elements == dc->al_extents) return 0; in_use = 0; - t = mdev->act_log; + t = device->act_log; n = lc_create("act_log", drbd_al_ext_cache, AL_UPDATES_PER_TRANSACTION, dc->al_extents, sizeof(struct lc_element), 0); if (n == NULL) { - dev_err(DEV, "Cannot allocate act_log lru!\n"); + drbd_err(device, "Cannot allocate act_log lru!\n"); return -ENOMEM; } - spin_lock_irq(&mdev->al_lock); + spin_lock_irq(&device->al_lock); if (t) { for (i = 0; i < t->nr_elements; i++) { e = lc_element_by_index(t, i); if (e->refcnt) - dev_err(DEV, "refcnt(%d)==%d\n", + drbd_err(device, "refcnt(%d)==%d\n", e->lc_number, e->refcnt); in_use += e->refcnt; } } if (!in_use) - mdev->act_log = n; - spin_unlock_irq(&mdev->al_lock); + device->act_log = n; + spin_unlock_irq(&device->al_lock); if (in_use) { - dev_err(DEV, "Activity log still in use!\n"); + drbd_err(device, "Activity log still in use!\n"); lc_destroy(n); return -EBUSY; } else { if (t) lc_destroy(t); } - drbd_md_mark_dirty(mdev); /* we changed mdev->act_log->nr_elemens */ + drbd_md_mark_dirty(device); /* we changed device->act_log->nr_elemens */ return 0; } -static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size) +static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backing_dev *bdev, + unsigned int max_bio_size) { - struct request_queue * const q = mdev->rq_queue; + struct request_queue * const q = device->rq_queue; unsigned int max_hw_sectors = max_bio_size >> 9; unsigned int max_segments = 0; + struct request_queue *b = NULL; - if (get_ldev_if_state(mdev, D_ATTACHING)) { - struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue; + if (bdev) { + b = bdev->backing_bdev->bd_disk->queue; max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9); rcu_read_lock(); - max_segments = rcu_dereference(mdev->ldev->disk_conf)->max_bio_bvecs; + max_segments = rcu_dereference(device->ldev->disk_conf)->max_bio_bvecs; rcu_read_unlock(); - put_ldev(mdev); + + blk_set_stacking_limits(&q->limits); + blk_queue_max_write_same_sectors(q, 0); } blk_queue_logical_block_size(q, 512); @@ -1040,104 +1148,125 @@ blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS); blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1); - if (get_ldev_if_state(mdev, D_ATTACHING)) { - struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue; + if (b) { + struct drbd_connection *connection = first_peer_device(device)->connection; + + if (blk_queue_discard(b) && + (connection->cstate < C_CONNECTED || connection->agreed_features & FF_TRIM)) { + /* For now, don't allow more than one activity log extent worth of data + * to be discarded in one go. We may need to rework drbd_al_begin_io() + * to allow for even larger discard ranges */ + blk_queue_max_discard_sectors(q, DRBD_MAX_DISCARD_SECTORS); + + queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); + /* REALLY? Is stacking secdiscard "legal"? */ + if (blk_queue_secdiscard(b)) + queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q); + } else { + blk_queue_max_discard_sectors(q, 0); + queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q); + queue_flag_clear_unlocked(QUEUE_FLAG_SECDISCARD, q); + } blk_queue_stack_limits(q, b); if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) { - dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n", + drbd_info(device, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n", q->backing_dev_info.ra_pages, b->backing_dev_info.ra_pages); q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages; } - put_ldev(mdev); } } -void drbd_reconsider_max_bio_size(struct drbd_conf *mdev) +void drbd_reconsider_max_bio_size(struct drbd_device *device, struct drbd_backing_dev *bdev) { unsigned int now, new, local, peer; - now = queue_max_hw_sectors(mdev->rq_queue) << 9; - local = mdev->local_max_bio_size; /* Eventually last known value, from volatile memory */ - peer = mdev->peer_max_bio_size; /* Eventually last known value, from meta data */ - - if (get_ldev_if_state(mdev, D_ATTACHING)) { - local = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9; - mdev->local_max_bio_size = local; - put_ldev(mdev); + now = queue_max_hw_sectors(device->rq_queue) << 9; + local = device->local_max_bio_size; /* Eventually last known value, from volatile memory */ + peer = device->peer_max_bio_size; /* Eventually last known value, from meta data */ + + if (bdev) { + local = queue_max_hw_sectors(bdev->backing_bdev->bd_disk->queue) << 9; + device->local_max_bio_size = local; } local = min(local, DRBD_MAX_BIO_SIZE); /* We may ignore peer limits if the peer is modern enough. Because new from 8.3.8 onwards the peer can use multiple BIOs for a single peer_request */ - if (mdev->state.conn >= C_CONNECTED) { - if (mdev->tconn->agreed_pro_version < 94) - peer = min( mdev->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET); + if (device->state.conn >= C_WF_REPORT_PARAMS) { + if (first_peer_device(device)->connection->agreed_pro_version < 94) + peer = min(device->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET); /* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */ - else if (mdev->tconn->agreed_pro_version == 94) + else if (first_peer_device(device)->connection->agreed_pro_version == 94) peer = DRBD_MAX_SIZE_H80_PACKET; - else if (mdev->tconn->agreed_pro_version < 100) + else if (first_peer_device(device)->connection->agreed_pro_version < 100) peer = DRBD_MAX_BIO_SIZE_P95; /* drbd 8.3.8 onwards, before 8.4.0 */ else peer = DRBD_MAX_BIO_SIZE; - } + /* We may later detach and re-attach on a disconnected Primary. + * Avoid this setting to jump back in that case. + * We want to store what we know the peer DRBD can handle, + * not what the peer IO backend can handle. */ + if (peer > device->peer_max_bio_size) + device->peer_max_bio_size = peer; + } new = min(local, peer); - if (mdev->state.role == R_PRIMARY && new < now) - dev_err(DEV, "ASSERT FAILED new < now; (%u < %u)\n", new, now); + if (device->state.role == R_PRIMARY && new < now) + drbd_err(device, "ASSERT FAILED new < now; (%u < %u)\n", new, now); if (new != now) - dev_info(DEV, "max BIO size = %u\n", new); + drbd_info(device, "max BIO size = %u\n", new); - drbd_setup_queue_param(mdev, new); + drbd_setup_queue_param(device, bdev, new); } /* Starts the worker thread */ -static void conn_reconfig_start(struct drbd_tconn *tconn) +static void conn_reconfig_start(struct drbd_connection *connection) { - drbd_thread_start(&tconn->worker); - conn_flush_workqueue(tconn); + drbd_thread_start(&connection->worker); + drbd_flush_workqueue(&connection->sender_work); } /* if still unconfigured, stops worker again. */ -static void conn_reconfig_done(struct drbd_tconn *tconn) +static void conn_reconfig_done(struct drbd_connection *connection) { bool stop_threads; - spin_lock_irq(&tconn->req_lock); - stop_threads = conn_all_vols_unconf(tconn) && - tconn->cstate == C_STANDALONE; - spin_unlock_irq(&tconn->req_lock); + spin_lock_irq(&connection->resource->req_lock); + stop_threads = conn_all_vols_unconf(connection) && + connection->cstate == C_STANDALONE; + spin_unlock_irq(&connection->resource->req_lock); if (stop_threads) { /* asender is implicitly stopped by receiver * in conn_disconnect() */ - drbd_thread_stop(&tconn->receiver); - drbd_thread_stop(&tconn->worker); + drbd_thread_stop(&connection->receiver); + drbd_thread_stop(&connection->worker); } } /* Make sure IO is suspended before calling this function(). */ -static void drbd_suspend_al(struct drbd_conf *mdev) +static void drbd_suspend_al(struct drbd_device *device) { int s = 0; - if (!lc_try_lock(mdev->act_log)) { - dev_warn(DEV, "Failed to lock al in drbd_suspend_al()\n"); + if (!lc_try_lock(device->act_log)) { + drbd_warn(device, "Failed to lock al in drbd_suspend_al()\n"); return; } - drbd_al_shrink(mdev); - spin_lock_irq(&mdev->tconn->req_lock); - if (mdev->state.conn < C_CONNECTED) - s = !test_and_set_bit(AL_SUSPENDED, &mdev->flags); - spin_unlock_irq(&mdev->tconn->req_lock); - lc_unlock(mdev->act_log); + drbd_al_shrink(device); + spin_lock_irq(&device->resource->req_lock); + if (device->state.conn < C_CONNECTED) + s = !test_and_set_bit(AL_SUSPENDED, &device->flags); + spin_unlock_irq(&device->resource->req_lock); + lc_unlock(device->act_log); if (s) - dev_info(DEV, "Suspended AL updates\n"); + drbd_info(device, "Suspended AL updates\n"); } @@ -1175,25 +1304,34 @@ return (al_size_4k - 1) * AL_CONTEXT_PER_TRANSACTION; } +static bool write_ordering_changed(struct disk_conf *a, struct disk_conf *b) +{ + return a->disk_barrier != b->disk_barrier || + a->disk_flushes != b->disk_flushes || + a->disk_drain != b->disk_drain; +} + int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info) { + struct drbd_config_context adm_ctx; enum drbd_ret_code retcode; - struct drbd_conf *mdev; + struct drbd_device *device; struct disk_conf *new_disk_conf, *old_disk_conf; struct fifo_buffer *old_plan = NULL, *new_plan = NULL; int err, fifo_size; - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) - goto out; + goto finish; - mdev = adm_ctx.mdev; + device = adm_ctx.device; + mutex_lock(&adm_ctx.resource->adm_mutex); /* we also need a disk * to change the options on */ - if (!get_ldev(mdev)) { + if (!get_ldev(device)) { retcode = ERR_NO_DISK; goto out; } @@ -1204,8 +1342,8 @@ goto fail; } - mutex_lock(&mdev->tconn->conf_update); - old_disk_conf = mdev->ldev->disk_conf; + mutex_lock(&device->resource->conf_update); + old_disk_conf = device->ldev->disk_conf; *new_disk_conf = *old_disk_conf; if (should_set_defaults(info)) set_disk_conf_defaults(new_disk_conf); @@ -1213,7 +1351,8 @@ err = disk_conf_from_attrs_for_change(new_disk_conf, info); if (err && err != -ENOMSG) { retcode = ERR_MANDATORY_TAG; - drbd_msg_put_info(from_attrs_err_to_txt(err)); + drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err)); + goto fail_unlock; } if (!expect(new_disk_conf->resync_rate >= 1)) @@ -1221,29 +1360,29 @@ if (new_disk_conf->al_extents < DRBD_AL_EXTENTS_MIN) new_disk_conf->al_extents = DRBD_AL_EXTENTS_MIN; - if (new_disk_conf->al_extents > drbd_al_extents_max(mdev->ldev)) - new_disk_conf->al_extents = drbd_al_extents_max(mdev->ldev); + if (new_disk_conf->al_extents > drbd_al_extents_max(device->ldev)) + new_disk_conf->al_extents = drbd_al_extents_max(device->ldev); if (new_disk_conf->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX) new_disk_conf->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX; fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ; - if (fifo_size != mdev->rs_plan_s->size) { + if (fifo_size != device->rs_plan_s->size) { new_plan = fifo_alloc(fifo_size); if (!new_plan) { - dev_err(DEV, "kmalloc of fifo_buffer failed"); + drbd_err(device, "kmalloc of fifo_buffer failed"); retcode = ERR_NOMEM; goto fail_unlock; } } - drbd_suspend_io(mdev); - wait_event(mdev->al_wait, lc_try_lock(mdev->act_log)); - drbd_al_shrink(mdev); - err = drbd_check_al_size(mdev, new_disk_conf); - lc_unlock(mdev->act_log); - wake_up(&mdev->al_wait); - drbd_resume_io(mdev); + drbd_suspend_io(device); + wait_event(device->al_wait, lc_try_lock(device->act_log)); + drbd_al_shrink(device); + err = drbd_check_al_size(device, new_disk_conf); + lc_unlock(device->act_log); + wake_up(&device->al_wait); + drbd_resume_io(device); if (err) { retcode = ERR_NOMEM; @@ -1251,10 +1390,10 @@ } write_lock_irq(&global_state_lock); - retcode = drbd_resync_after_valid(mdev, new_disk_conf->resync_after); + retcode = drbd_resync_after_valid(device, new_disk_conf->resync_after); if (retcode == NO_ERROR) { - rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf); - drbd_resync_after_changed(mdev); + rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf); + drbd_resync_after_changed(device); } write_unlock_irq(&global_state_lock); @@ -1262,50 +1401,60 @@ goto fail_unlock; if (new_plan) { - old_plan = mdev->rs_plan_s; - rcu_assign_pointer(mdev->rs_plan_s, new_plan); + old_plan = device->rs_plan_s; + rcu_assign_pointer(device->rs_plan_s, new_plan); } - mutex_unlock(&mdev->tconn->conf_update); + mutex_unlock(&device->resource->conf_update); if (new_disk_conf->al_updates) - mdev->ldev->md.flags &= ~MDF_AL_DISABLED; + device->ldev->md.flags &= ~MDF_AL_DISABLED; else - mdev->ldev->md.flags |= MDF_AL_DISABLED; + device->ldev->md.flags |= MDF_AL_DISABLED; if (new_disk_conf->md_flushes) - clear_bit(MD_NO_FUA, &mdev->flags); + clear_bit(MD_NO_FUA, &device->flags); else - set_bit(MD_NO_FUA, &mdev->flags); + set_bit(MD_NO_FUA, &device->flags); - drbd_bump_write_ordering(mdev->tconn, WO_bdev_flush); + if (write_ordering_changed(old_disk_conf, new_disk_conf)) + drbd_bump_write_ordering(device->resource, NULL, WO_bdev_flush); - drbd_md_sync(mdev); + drbd_md_sync(device); - if (mdev->state.conn >= C_CONNECTED) - drbd_send_sync_param(mdev); + if (device->state.conn >= C_CONNECTED) { + struct drbd_peer_device *peer_device; + + for_each_peer_device(peer_device, device) + drbd_send_sync_param(peer_device); + } synchronize_rcu(); kfree(old_disk_conf); kfree(old_plan); - mod_timer(&mdev->request_timer, jiffies + HZ); + mod_timer(&device->request_timer, jiffies + HZ); goto success; fail_unlock: - mutex_unlock(&mdev->tconn->conf_update); + mutex_unlock(&device->resource->conf_update); fail: kfree(new_disk_conf); kfree(new_plan); success: - put_ldev(mdev); + put_ldev(device); out: - drbd_adm_finish(info, retcode); + mutex_unlock(&adm_ctx.resource->adm_mutex); + finish: + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info) { - struct drbd_conf *mdev; + struct drbd_config_context adm_ctx; + struct drbd_device *device; + struct drbd_peer_device *peer_device; + struct drbd_connection *connection; int err; enum drbd_ret_code retcode; enum determine_dev_size dd; @@ -1320,17 +1469,20 @@ enum drbd_state_rv rv; struct net_conf *nc; - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) goto finish; - mdev = adm_ctx.mdev; - conn_reconfig_start(mdev->tconn); + device = adm_ctx.device; + mutex_lock(&adm_ctx.resource->adm_mutex); + peer_device = first_peer_device(device); + connection = peer_device ? peer_device->connection : NULL; + conn_reconfig_start(connection); /* if you want to reconfigure, please tear down first */ - if (mdev->state.disk > D_DISKLESS) { + if (device->state.disk > D_DISKLESS) { retcode = ERR_DISK_CONFIGURED; goto fail; } @@ -1338,17 +1490,17 @@ * drbd_ldev_destroy is done already, we may end up here very fast, * e.g. if someone calls attach from the on-io-error handler, * to realize a "hot spare" feature (not that I'd recommend that) */ - wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt)); + wait_event(device->misc_wait, !test_bit(GOING_DISKLESS, &device->flags)); /* make sure there is no leftover from previous force-detach attempts */ - clear_bit(FORCE_DETACH, &mdev->flags); - clear_bit(WAS_IO_ERROR, &mdev->flags); - clear_bit(WAS_READ_ERROR, &mdev->flags); + clear_bit(FORCE_DETACH, &device->flags); + clear_bit(WAS_IO_ERROR, &device->flags); + clear_bit(WAS_READ_ERROR, &device->flags); /* and no leftover from previously aborted resync or verify, either */ - mdev->rs_total = 0; - mdev->rs_failed = 0; - atomic_set(&mdev->rs_pending_cnt, 0); + device->rs_total = 0; + device->rs_failed = 0; + atomic_set(&device->rs_pending_cnt, 0); /* allocation not in the IO path, drbdsetup context */ nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL); @@ -1369,7 +1521,7 @@ err = disk_conf_from_attrs(new_disk_conf, info); if (err) { retcode = ERR_MANDATORY_TAG; - drbd_msg_put_info(from_attrs_err_to_txt(err)); + drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err)); goto fail; } @@ -1388,13 +1540,13 @@ } write_lock_irq(&global_state_lock); - retcode = drbd_resync_after_valid(mdev, new_disk_conf->resync_after); + retcode = drbd_resync_after_valid(device, new_disk_conf->resync_after); write_unlock_irq(&global_state_lock); if (retcode != NO_ERROR) goto fail; rcu_read_lock(); - nc = rcu_dereference(mdev->tconn->net_conf); + nc = rcu_dereference(connection->net_conf); if (nc) { if (new_disk_conf->fencing == FP_STONITH && nc->wire_protocol == DRBD_PROT_A) { rcu_read_unlock(); @@ -1405,9 +1557,9 @@ rcu_read_unlock(); bdev = blkdev_get_by_path(new_disk_conf->backing_dev, - FMODE_READ | FMODE_WRITE | FMODE_EXCL, mdev); + FMODE_READ | FMODE_WRITE | FMODE_EXCL, device); if (IS_ERR(bdev)) { - dev_err(DEV, "open(\"%s\") failed with %ld\n", new_disk_conf->backing_dev, + drbd_err(device, "open(\"%s\") failed with %ld\n", new_disk_conf->backing_dev, PTR_ERR(bdev)); retcode = ERR_OPEN_DISK; goto fail; @@ -1425,9 +1577,9 @@ bdev = blkdev_get_by_path(new_disk_conf->meta_dev, FMODE_READ | FMODE_WRITE | FMODE_EXCL, (new_disk_conf->meta_dev_idx < 0) ? - (void *)mdev : (void *)drbd_m_holder); + (void *)device : (void *)drbd_m_holder); if (IS_ERR(bdev)) { - dev_err(DEV, "open(\"%s\") failed with %ld\n", new_disk_conf->meta_dev, + drbd_err(device, "open(\"%s\") failed with %ld\n", new_disk_conf->meta_dev, PTR_ERR(bdev)); retcode = ERR_OPEN_MD_DISK; goto fail; @@ -1451,7 +1603,7 @@ /* Read our meta data super block early. * This also sets other on-disk offsets. */ - retcode = drbd_md_read(mdev, nbc); + retcode = drbd_md_read(device, nbc); if (retcode != NO_ERROR) goto fail; @@ -1461,7 +1613,7 @@ new_disk_conf->al_extents = drbd_al_extents_max(nbc); if (drbd_get_max_capacity(nbc) < new_disk_conf->disk_size) { - dev_err(DEV, "max capacity %llu smaller than disk size %llu\n", + drbd_err(device, "max capacity %llu smaller than disk size %llu\n", (unsigned long long) drbd_get_max_capacity(nbc), (unsigned long long) new_disk_conf->disk_size); retcode = ERR_DISK_TOO_SMALL; @@ -1479,7 +1631,7 @@ if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) { retcode = ERR_MD_DISK_TOO_SMALL; - dev_warn(DEV, "refusing attach: md-device too small, " + drbd_warn(device, "refusing attach: md-device too small, " "at least %llu sectors needed for this meta-disk type\n", (unsigned long long) min_md_device_sectors); goto fail; @@ -1488,7 +1640,7 @@ /* Make sure the new disk is big enough * (we may currently be R_PRIMARY with no local disk...) */ if (drbd_get_max_capacity(nbc) < - drbd_get_capacity(mdev->this_bdev)) { + drbd_get_capacity(device->this_bdev)) { retcode = ERR_DISK_TOO_SMALL; goto fail; } @@ -1496,15 +1648,15 @@ nbc->known_size = drbd_get_capacity(nbc->backing_bdev); if (nbc->known_size > max_possible_sectors) { - dev_warn(DEV, "==> truncating very big lower level device " + drbd_warn(device, "==> truncating very big lower level device " "to currently maximum possible %llu sectors <==\n", (unsigned long long) max_possible_sectors); if (new_disk_conf->meta_dev_idx >= 0) - dev_warn(DEV, "==>> using internal or flexible " + drbd_warn(device, "==>> using internal or flexible " "meta data may help <<==\n"); } - drbd_suspend_io(mdev); + drbd_suspend_io(device); /* also wait for the last barrier ack. */ /* FIXME see also https://daiquiri.linbit/cgi-bin/bugzilla/show_bug.cgi?id=171 * We need a way to either ignore barrier acks for barriers sent before a device @@ -1512,45 +1664,45 @@ * As barriers are counted per resource, * we'd need to suspend io on all devices of a resource. */ - wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt) || drbd_suspended(mdev)); + wait_event(device->misc_wait, !atomic_read(&device->ap_pending_cnt) || drbd_suspended(device)); /* and for any other previously queued work */ - drbd_flush_workqueue(mdev); + drbd_flush_workqueue(&connection->sender_work); - rv = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE); + rv = _drbd_request_state(device, NS(disk, D_ATTACHING), CS_VERBOSE); retcode = rv; /* FIXME: Type mismatch. */ - drbd_resume_io(mdev); + drbd_resume_io(device); if (rv < SS_SUCCESS) goto fail; - if (!get_ldev_if_state(mdev, D_ATTACHING)) + if (!get_ldev_if_state(device, D_ATTACHING)) goto force_diskless; - if (!mdev->bitmap) { - if (drbd_bm_init(mdev)) { + if (!device->bitmap) { + if (drbd_bm_init(device)) { retcode = ERR_NOMEM; goto force_diskless_dec; } } - if (mdev->state.conn < C_CONNECTED && - mdev->state.role == R_PRIMARY && - (mdev->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) { - dev_err(DEV, "Can only attach to data with current UUID=%016llX\n", - (unsigned long long)mdev->ed_uuid); + if (device->state.conn < C_CONNECTED && + device->state.role == R_PRIMARY && device->ed_uuid && + (device->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) { + drbd_err(device, "Can only attach to data with current UUID=%016llX\n", + (unsigned long long)device->ed_uuid); retcode = ERR_DATA_NOT_CURRENT; goto force_diskless_dec; } /* Since we are diskless, fix the activity log first... */ - if (drbd_check_al_size(mdev, new_disk_conf)) { + if (drbd_check_al_size(device, new_disk_conf)) { retcode = ERR_NOMEM; goto force_diskless_dec; } /* Prevent shrinking of consistent devices ! */ if (drbd_md_test_flag(nbc, MDF_CONSISTENT) && - drbd_new_dev_size(mdev, nbc, nbc->disk_conf->disk_size, 0) < nbc->md.la_size_sect) { - dev_warn(DEV, "refusing to truncate a consistent device\n"); + drbd_new_dev_size(device, nbc, nbc->disk_conf->disk_size, 0) < nbc->md.la_size_sect) { + drbd_warn(device, "refusing to truncate a consistent device\n"); retcode = ERR_DISK_TOO_SMALL; goto force_diskless_dec; } @@ -1558,40 +1710,40 @@ /* Reset the "barriers don't work" bits here, then force meta data to * be written, to ensure we determine if barriers are supported. */ if (new_disk_conf->md_flushes) - clear_bit(MD_NO_FUA, &mdev->flags); + clear_bit(MD_NO_FUA, &device->flags); else - set_bit(MD_NO_FUA, &mdev->flags); + set_bit(MD_NO_FUA, &device->flags); /* Point of no return reached. * Devices and memory are no longer released by error cleanup below. - * now mdev takes over responsibility, and the state engine should + * now device takes over responsibility, and the state engine should * clean it up somewhere. */ - D_ASSERT(mdev->ldev == NULL); - mdev->ldev = nbc; - mdev->resync = resync_lru; - mdev->rs_plan_s = new_plan; + D_ASSERT(device, device->ldev == NULL); + device->ldev = nbc; + device->resync = resync_lru; + device->rs_plan_s = new_plan; nbc = NULL; resync_lru = NULL; new_disk_conf = NULL; new_plan = NULL; - drbd_bump_write_ordering(mdev->tconn, WO_bdev_flush); + drbd_bump_write_ordering(device->resource, device->ldev, WO_bdev_flush); - if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY)) - set_bit(CRASHED_PRIMARY, &mdev->flags); + if (drbd_md_test_flag(device->ldev, MDF_CRASHED_PRIMARY)) + set_bit(CRASHED_PRIMARY, &device->flags); else - clear_bit(CRASHED_PRIMARY, &mdev->flags); + clear_bit(CRASHED_PRIMARY, &device->flags); - if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) && - !(mdev->state.role == R_PRIMARY && mdev->tconn->susp_nod)) - set_bit(CRASHED_PRIMARY, &mdev->flags); - - mdev->send_cnt = 0; - mdev->recv_cnt = 0; - mdev->read_cnt = 0; - mdev->writ_cnt = 0; + if (drbd_md_test_flag(device->ldev, MDF_PRIMARY_IND) && + !(device->state.role == R_PRIMARY && device->resource->susp_nod)) + set_bit(CRASHED_PRIMARY, &device->flags); + + device->send_cnt = 0; + device->recv_cnt = 0; + device->read_cnt = 0; + device->writ_cnt = 0; - drbd_reconsider_max_bio_size(mdev); + drbd_reconsider_max_bio_size(device, device->ldev); /* If I am currently not R_PRIMARY, * but meta data primary indicator is set, @@ -1607,50 +1759,50 @@ * so we can automatically recover from a crash of a * degraded but active "cluster" after a certain timeout. */ - clear_bit(USE_DEGR_WFC_T, &mdev->flags); - if (mdev->state.role != R_PRIMARY && - drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) && - !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND)) - set_bit(USE_DEGR_WFC_T, &mdev->flags); + clear_bit(USE_DEGR_WFC_T, &device->flags); + if (device->state.role != R_PRIMARY && + drbd_md_test_flag(device->ldev, MDF_PRIMARY_IND) && + !drbd_md_test_flag(device->ldev, MDF_CONNECTED_IND)) + set_bit(USE_DEGR_WFC_T, &device->flags); - dd = drbd_determine_dev_size(mdev, 0); - if (dd == dev_size_error) { + dd = drbd_determine_dev_size(device, 0, NULL); + if (dd <= DS_ERROR) { retcode = ERR_NOMEM_BITMAP; goto force_diskless_dec; - } else if (dd == grew) - set_bit(RESYNC_AFTER_NEG, &mdev->flags); + } else if (dd == DS_GREW) + set_bit(RESYNC_AFTER_NEG, &device->flags); - if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC) || - (test_bit(CRASHED_PRIMARY, &mdev->flags) && - drbd_md_test_flag(mdev->ldev, MDF_AL_DISABLED))) { - dev_info(DEV, "Assuming that all blocks are out of sync " + if (drbd_md_test_flag(device->ldev, MDF_FULL_SYNC) || + (test_bit(CRASHED_PRIMARY, &device->flags) && + drbd_md_test_flag(device->ldev, MDF_AL_DISABLED))) { + drbd_info(device, "Assuming that all blocks are out of sync " "(aka FullSync)\n"); - if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, + if (drbd_bitmap_io(device, &drbd_bmio_set_n_write, "set_n_write from attaching", BM_LOCKED_MASK)) { retcode = ERR_IO_MD_DISK; goto force_diskless_dec; } } else { - if (drbd_bitmap_io(mdev, &drbd_bm_read, + if (drbd_bitmap_io(device, &drbd_bm_read, "read from attaching", BM_LOCKED_MASK)) { retcode = ERR_IO_MD_DISK; goto force_diskless_dec; } } - if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev)) - drbd_suspend_al(mdev); /* IO is still suspended here... */ + if (_drbd_bm_total_weight(device) == drbd_bm_bits(device)) + drbd_suspend_al(device); /* IO is still suspended here... */ - spin_lock_irq(&mdev->tconn->req_lock); - os = drbd_read_state(mdev); + spin_lock_irq(&device->resource->req_lock); + os = drbd_read_state(device); ns = os; /* If MDF_CONSISTENT is not set go into inconsistent state, otherwise investigate MDF_WasUpToDate... If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state, otherwise into D_CONSISTENT state. */ - if (drbd_md_test_flag(mdev->ldev, MDF_CONSISTENT)) { - if (drbd_md_test_flag(mdev->ldev, MDF_WAS_UP_TO_DATE)) + if (drbd_md_test_flag(device->ldev, MDF_CONSISTENT)) { + if (drbd_md_test_flag(device->ldev, MDF_WAS_UP_TO_DATE)) ns.disk = D_CONSISTENT; else ns.disk = D_OUTDATED; @@ -1658,12 +1810,12 @@ ns.disk = D_INCONSISTENT; } - if (drbd_md_test_flag(mdev->ldev, MDF_PEER_OUT_DATED)) + if (drbd_md_test_flag(device->ldev, MDF_PEER_OUT_DATED)) ns.pdsk = D_OUTDATED; rcu_read_lock(); if (ns.disk == D_CONSISTENT && - (ns.pdsk == D_OUTDATED || rcu_dereference(mdev->ldev->disk_conf)->fencing == FP_DONT_CARE)) + (ns.pdsk == D_OUTDATED || rcu_dereference(device->ldev->disk_conf)->fencing == FP_DONT_CARE)) ns.disk = D_UP_TO_DATE; /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND, @@ -1671,56 +1823,57 @@ this point, because drbd_request_state() modifies these flags. */ - if (rcu_dereference(mdev->ldev->disk_conf)->al_updates) - mdev->ldev->md.flags &= ~MDF_AL_DISABLED; + if (rcu_dereference(device->ldev->disk_conf)->al_updates) + device->ldev->md.flags &= ~MDF_AL_DISABLED; else - mdev->ldev->md.flags |= MDF_AL_DISABLED; + device->ldev->md.flags |= MDF_AL_DISABLED; rcu_read_unlock(); /* In case we are C_CONNECTED postpone any decision on the new disk state after the negotiation phase. */ - if (mdev->state.conn == C_CONNECTED) { - mdev->new_state_tmp.i = ns.i; + if (device->state.conn == C_CONNECTED) { + device->new_state_tmp.i = ns.i; ns.i = os.i; ns.disk = D_NEGOTIATING; /* We expect to receive up-to-date UUIDs soon. To avoid a race in receive_state, free p_uuid while holding req_lock. I.e. atomic with the state change */ - kfree(mdev->p_uuid); - mdev->p_uuid = NULL; + kfree(device->p_uuid); + device->p_uuid = NULL; } - rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL); - spin_unlock_irq(&mdev->tconn->req_lock); + rv = _drbd_set_state(device, ns, CS_VERBOSE, NULL); + spin_unlock_irq(&device->resource->req_lock); if (rv < SS_SUCCESS) goto force_diskless_dec; - mod_timer(&mdev->request_timer, jiffies + HZ); + mod_timer(&device->request_timer, jiffies + HZ); - if (mdev->state.role == R_PRIMARY) - mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1; + if (device->state.role == R_PRIMARY) + device->ldev->md.uuid[UI_CURRENT] |= (u64)1; else - mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1; + device->ldev->md.uuid[UI_CURRENT] &= ~(u64)1; - drbd_md_mark_dirty(mdev); - drbd_md_sync(mdev); + drbd_md_mark_dirty(device); + drbd_md_sync(device); - kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE); - put_ldev(mdev); - conn_reconfig_done(mdev->tconn); - drbd_adm_finish(info, retcode); + kobject_uevent(&disk_to_dev(device->vdisk)->kobj, KOBJ_CHANGE); + put_ldev(device); + conn_reconfig_done(connection); + mutex_unlock(&adm_ctx.resource->adm_mutex); + drbd_adm_finish(&adm_ctx, info, retcode); return 0; force_diskless_dec: - put_ldev(mdev); + put_ldev(device); force_diskless: - drbd_force_state(mdev, NS(disk, D_DISKLESS)); - drbd_md_sync(mdev); + drbd_force_state(device, NS(disk, D_DISKLESS)); + drbd_md_sync(device); fail: - conn_reconfig_done(mdev->tconn); + conn_reconfig_done(connection); if (nbc) { if (nbc->backing_bdev) blkdev_put(nbc->backing_bdev, @@ -1733,32 +1886,32 @@ kfree(new_disk_conf); lc_destroy(resync_lru); kfree(new_plan); - + mutex_unlock(&adm_ctx.resource->adm_mutex); finish: - drbd_adm_finish(info, retcode); + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } -static int adm_detach(struct drbd_conf *mdev, int force) +static int adm_detach(struct drbd_device *device, int force) { enum drbd_state_rv retcode; int ret; if (force) { - set_bit(FORCE_DETACH, &mdev->flags); - drbd_force_state(mdev, NS(disk, D_FAILED)); + set_bit(FORCE_DETACH, &device->flags); + drbd_force_state(device, NS(disk, D_FAILED)); retcode = SS_SUCCESS; goto out; } - drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */ - drbd_md_get_buffer(mdev); /* make sure there is no in-flight meta-data IO */ - retcode = drbd_request_state(mdev, NS(disk, D_FAILED)); - drbd_md_put_buffer(mdev); + drbd_suspend_io(device); /* so no-one is stuck in drbd_al_begin_io */ + drbd_md_get_buffer(device, __func__); /* make sure there is no in-flight meta-data IO */ + retcode = drbd_request_state(device, NS(disk, D_FAILED)); + drbd_md_put_buffer(device); /* D_FAILED will transition to DISKLESS. */ - ret = wait_event_interruptible(mdev->misc_wait, - mdev->state.disk != D_FAILED); - drbd_resume_io(mdev); + ret = wait_event_interruptible(device->misc_wait, + device->state.disk != D_FAILED); + drbd_resume_io(device); if ((int)retcode == (int)SS_IS_DISKLESS) retcode = SS_NOTHING_TO_DO; if (ret) @@ -1774,11 +1927,12 @@ * Only then we have finally detached. */ int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info) { + struct drbd_config_context adm_ctx; enum drbd_ret_code retcode; struct detach_parms parms = { }; int err; - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) @@ -1788,29 +1942,32 @@ err = detach_parms_from_attrs(&parms, info); if (err) { retcode = ERR_MANDATORY_TAG; - drbd_msg_put_info(from_attrs_err_to_txt(err)); + drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err)); goto out; } } - retcode = adm_detach(adm_ctx.mdev, parms.force_detach); + mutex_lock(&adm_ctx.resource->adm_mutex); + retcode = adm_detach(adm_ctx.device, parms.force_detach); + mutex_unlock(&adm_ctx.resource->adm_mutex); out: - drbd_adm_finish(info, retcode); + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } -static bool conn_resync_running(struct drbd_tconn *tconn) +static bool conn_resync_running(struct drbd_connection *connection) { - struct drbd_conf *mdev; + struct drbd_peer_device *peer_device; bool rv = false; int vnr; rcu_read_lock(); - idr_for_each_entry(&tconn->volumes, mdev, vnr) { - if (mdev->state.conn == C_SYNC_SOURCE || - mdev->state.conn == C_SYNC_TARGET || - mdev->state.conn == C_PAUSED_SYNC_S || - mdev->state.conn == C_PAUSED_SYNC_T) { + idr_for_each_entry(&connection->peer_devices, peer_device, vnr) { + struct drbd_device *device = peer_device->device; + if (device->state.conn == C_SYNC_SOURCE || + device->state.conn == C_SYNC_TARGET || + device->state.conn == C_PAUSED_SYNC_S || + device->state.conn == C_PAUSED_SYNC_T) { rv = true; break; } @@ -1820,16 +1977,17 @@ return rv; } -static bool conn_ov_running(struct drbd_tconn *tconn) +static bool conn_ov_running(struct drbd_connection *connection) { - struct drbd_conf *mdev; + struct drbd_peer_device *peer_device; bool rv = false; int vnr; rcu_read_lock(); - idr_for_each_entry(&tconn->volumes, mdev, vnr) { - if (mdev->state.conn == C_VERIFY_S || - mdev->state.conn == C_VERIFY_T) { + idr_for_each_entry(&connection->peer_devices, peer_device, vnr) { + struct drbd_device *device = peer_device->device; + if (device->state.conn == C_VERIFY_S || + device->state.conn == C_VERIFY_T) { rv = true; break; } @@ -1840,63 +1998,65 @@ } static enum drbd_ret_code -_check_net_options(struct drbd_tconn *tconn, struct net_conf *old_conf, struct net_conf *new_conf) +_check_net_options(struct drbd_connection *connection, struct net_conf *old_net_conf, struct net_conf *new_net_conf) { - struct drbd_conf *mdev; + struct drbd_peer_device *peer_device; int i; - if (old_conf && tconn->cstate == C_WF_REPORT_PARAMS && tconn->agreed_pro_version < 100) { - if (new_conf->wire_protocol != old_conf->wire_protocol) + if (old_net_conf && connection->cstate == C_WF_REPORT_PARAMS && connection->agreed_pro_version < 100) { + if (new_net_conf->wire_protocol != old_net_conf->wire_protocol) return ERR_NEED_APV_100; - if (new_conf->two_primaries != old_conf->two_primaries) + if (new_net_conf->two_primaries != old_net_conf->two_primaries) return ERR_NEED_APV_100; - if (strcmp(new_conf->integrity_alg, old_conf->integrity_alg)) + if (strcmp(new_net_conf->integrity_alg, old_net_conf->integrity_alg)) return ERR_NEED_APV_100; } - if (!new_conf->two_primaries && - conn_highest_role(tconn) == R_PRIMARY && - conn_highest_peer(tconn) == R_PRIMARY) + if (!new_net_conf->two_primaries && + conn_highest_role(connection) == R_PRIMARY && + conn_highest_peer(connection) == R_PRIMARY) return ERR_NEED_ALLOW_TWO_PRI; - if (new_conf->two_primaries && - (new_conf->wire_protocol != DRBD_PROT_C)) + if (new_net_conf->two_primaries && + (new_net_conf->wire_protocol != DRBD_PROT_C)) return ERR_NOT_PROTO_C; - idr_for_each_entry(&tconn->volumes, mdev, i) { - if (get_ldev(mdev)) { - enum drbd_fencing_p fp = rcu_dereference(mdev->ldev->disk_conf)->fencing; - put_ldev(mdev); - if (new_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH) + idr_for_each_entry(&connection->peer_devices, peer_device, i) { + struct drbd_device *device = peer_device->device; + if (get_ldev(device)) { + enum drbd_fencing_p fp = rcu_dereference(device->ldev->disk_conf)->fencing; + put_ldev(device); + if (new_net_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH) return ERR_STONITH_AND_PROT_A; } - if (mdev->state.role == R_PRIMARY && new_conf->discard_my_data) + if (device->state.role == R_PRIMARY && new_net_conf->discard_my_data) return ERR_DISCARD_IMPOSSIBLE; } - if (new_conf->on_congestion != OC_BLOCK && new_conf->wire_protocol != DRBD_PROT_A) + if (new_net_conf->on_congestion != OC_BLOCK && new_net_conf->wire_protocol != DRBD_PROT_A) return ERR_CONG_NOT_PROTO_A; return NO_ERROR; } static enum drbd_ret_code -check_net_options(struct drbd_tconn *tconn, struct net_conf *new_conf) +check_net_options(struct drbd_connection *connection, struct net_conf *new_net_conf) { static enum drbd_ret_code rv; - struct drbd_conf *mdev; + struct drbd_peer_device *peer_device; int i; rcu_read_lock(); - rv = _check_net_options(tconn, rcu_dereference(tconn->net_conf), new_conf); + rv = _check_net_options(connection, rcu_dereference(connection->net_conf), new_net_conf); rcu_read_unlock(); - /* tconn->volumes protected by genl_lock() here */ - idr_for_each_entry(&tconn->volumes, mdev, i) { - if (!mdev->bitmap) { - if(drbd_bm_init(mdev)) + /* connection->peer_devices protected by genl_lock() here */ + idr_for_each_entry(&connection->peer_devices, peer_device, i) { + struct drbd_device *device = peer_device->device; + if (!device->bitmap) { + if (drbd_bm_init(device)) return ERR_NOMEM; } } @@ -1927,26 +2087,26 @@ } static enum drbd_ret_code -alloc_crypto(struct crypto *crypto, struct net_conf *new_conf) +alloc_crypto(struct crypto *crypto, struct net_conf *new_net_conf) { char hmac_name[CRYPTO_MAX_ALG_NAME]; enum drbd_ret_code rv; - rv = alloc_hash(&crypto->csums_tfm, new_conf->csums_alg, + rv = alloc_hash(&crypto->csums_tfm, new_net_conf->csums_alg, ERR_CSUMS_ALG); if (rv != NO_ERROR) return rv; - rv = alloc_hash(&crypto->verify_tfm, new_conf->verify_alg, + rv = alloc_hash(&crypto->verify_tfm, new_net_conf->verify_alg, ERR_VERIFY_ALG); if (rv != NO_ERROR) return rv; - rv = alloc_hash(&crypto->integrity_tfm, new_conf->integrity_alg, + rv = alloc_hash(&crypto->integrity_tfm, new_net_conf->integrity_alg, ERR_INTEGRITY_ALG); if (rv != NO_ERROR) return rv; - if (new_conf->cram_hmac_alg[0] != 0) { + if (new_net_conf->cram_hmac_alg[0] != 0) { snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", - new_conf->cram_hmac_alg); + new_net_conf->cram_hmac_alg); rv = alloc_hash(&crypto->cram_hmac_tfm, hmac_name, ERR_AUTH_ALG); @@ -1965,135 +2125,146 @@ int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info) { + struct drbd_config_context adm_ctx; enum drbd_ret_code retcode; - struct drbd_tconn *tconn; - struct net_conf *old_conf, *new_conf = NULL; + struct drbd_connection *connection; + struct net_conf *old_net_conf, *new_net_conf = NULL; int err; int ovr; /* online verify running */ int rsr; /* re-sync running */ struct crypto crypto = { }; - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONNECTION); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_CONNECTION); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) - goto out; + goto finish; - tconn = adm_ctx.tconn; + connection = adm_ctx.connection; + mutex_lock(&adm_ctx.resource->adm_mutex); - new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL); - if (!new_conf) { + new_net_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL); + if (!new_net_conf) { retcode = ERR_NOMEM; goto out; } - conn_reconfig_start(tconn); + conn_reconfig_start(connection); - mutex_lock(&tconn->data.mutex); - mutex_lock(&tconn->conf_update); - old_conf = tconn->net_conf; + mutex_lock(&connection->data.mutex); + mutex_lock(&connection->resource->conf_update); + old_net_conf = connection->net_conf; - if (!old_conf) { - drbd_msg_put_info("net conf missing, try connect"); + if (!old_net_conf) { + drbd_msg_put_info(adm_ctx.reply_skb, "net conf missing, try connect"); retcode = ERR_INVALID_REQUEST; goto fail; } - *new_conf = *old_conf; + *new_net_conf = *old_net_conf; if (should_set_defaults(info)) - set_net_conf_defaults(new_conf); + set_net_conf_defaults(new_net_conf); - err = net_conf_from_attrs_for_change(new_conf, info); + err = net_conf_from_attrs_for_change(new_net_conf, info); if (err && err != -ENOMSG) { retcode = ERR_MANDATORY_TAG; - drbd_msg_put_info(from_attrs_err_to_txt(err)); + drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err)); goto fail; } - retcode = check_net_options(tconn, new_conf); + retcode = check_net_options(connection, new_net_conf); if (retcode != NO_ERROR) goto fail; /* re-sync running */ - rsr = conn_resync_running(tconn); - if (rsr && strcmp(new_conf->csums_alg, old_conf->csums_alg)) { + rsr = conn_resync_running(connection); + if (rsr && strcmp(new_net_conf->csums_alg, old_net_conf->csums_alg)) { retcode = ERR_CSUMS_RESYNC_RUNNING; goto fail; } /* online verify running */ - ovr = conn_ov_running(tconn); - if (ovr && strcmp(new_conf->verify_alg, old_conf->verify_alg)) { + ovr = conn_ov_running(connection); + if (ovr && strcmp(new_net_conf->verify_alg, old_net_conf->verify_alg)) { retcode = ERR_VERIFY_RUNNING; goto fail; } - retcode = alloc_crypto(&crypto, new_conf); + retcode = alloc_crypto(&crypto, new_net_conf); if (retcode != NO_ERROR) goto fail; - rcu_assign_pointer(tconn->net_conf, new_conf); + rcu_assign_pointer(connection->net_conf, new_net_conf); if (!rsr) { - crypto_free_hash(tconn->csums_tfm); - tconn->csums_tfm = crypto.csums_tfm; + crypto_free_hash(connection->csums_tfm); + connection->csums_tfm = crypto.csums_tfm; crypto.csums_tfm = NULL; } if (!ovr) { - crypto_free_hash(tconn->verify_tfm); - tconn->verify_tfm = crypto.verify_tfm; + crypto_free_hash(connection->verify_tfm); + connection->verify_tfm = crypto.verify_tfm; crypto.verify_tfm = NULL; } - crypto_free_hash(tconn->integrity_tfm); - tconn->integrity_tfm = crypto.integrity_tfm; - if (tconn->cstate >= C_WF_REPORT_PARAMS && tconn->agreed_pro_version >= 100) - /* Do this without trying to take tconn->data.mutex again. */ - __drbd_send_protocol(tconn, P_PROTOCOL_UPDATE); + crypto_free_hash(connection->integrity_tfm); + connection->integrity_tfm = crypto.integrity_tfm; + if (connection->cstate >= C_WF_REPORT_PARAMS && connection->agreed_pro_version >= 100) + /* Do this without trying to take connection->data.mutex again. */ + __drbd_send_protocol(connection, P_PROTOCOL_UPDATE); - crypto_free_hash(tconn->cram_hmac_tfm); - tconn->cram_hmac_tfm = crypto.cram_hmac_tfm; + crypto_free_hash(connection->cram_hmac_tfm); + connection->cram_hmac_tfm = crypto.cram_hmac_tfm; - mutex_unlock(&tconn->conf_update); - mutex_unlock(&tconn->data.mutex); + mutex_unlock(&connection->resource->conf_update); + mutex_unlock(&connection->data.mutex); synchronize_rcu(); - kfree(old_conf); + kfree(old_net_conf); - if (tconn->cstate >= C_WF_REPORT_PARAMS) - drbd_send_sync_param(minor_to_mdev(conn_lowest_minor(tconn))); + if (connection->cstate >= C_WF_REPORT_PARAMS) { + struct drbd_peer_device *peer_device; + int vnr; + + idr_for_each_entry(&connection->peer_devices, peer_device, vnr) + drbd_send_sync_param(peer_device); + } goto done; fail: - mutex_unlock(&tconn->conf_update); - mutex_unlock(&tconn->data.mutex); + mutex_unlock(&connection->resource->conf_update); + mutex_unlock(&connection->data.mutex); free_crypto(&crypto); - kfree(new_conf); + kfree(new_net_conf); done: - conn_reconfig_done(tconn); + conn_reconfig_done(connection); out: - drbd_adm_finish(info, retcode); + mutex_unlock(&adm_ctx.resource->adm_mutex); + finish: + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info) { - struct drbd_conf *mdev; - struct net_conf *old_conf, *new_conf = NULL; + struct drbd_config_context adm_ctx; + struct drbd_peer_device *peer_device; + struct net_conf *old_net_conf, *new_net_conf = NULL; struct crypto crypto = { }; - struct drbd_tconn *tconn; + struct drbd_resource *resource; + struct drbd_connection *connection; enum drbd_ret_code retcode; int i; int err; - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) goto out; if (!(adm_ctx.my_addr && adm_ctx.peer_addr)) { - drbd_msg_put_info("connection endpoint(s) missing"); + drbd_msg_put_info(adm_ctx.reply_skb, "connection endpoint(s) missing"); retcode = ERR_INVALID_REQUEST; goto out; } @@ -2101,106 +2272,114 @@ /* No need for _rcu here. All reconfiguration is * strictly serialized on genl_lock(). We are protected against * concurrent reconfiguration/addition/deletion */ - list_for_each_entry(tconn, &drbd_tconns, all_tconn) { - if (nla_len(adm_ctx.my_addr) == tconn->my_addr_len && - !memcmp(nla_data(adm_ctx.my_addr), &tconn->my_addr, tconn->my_addr_len)) { - retcode = ERR_LOCAL_ADDR; - goto out; - } + for_each_resource(resource, &drbd_resources) { + for_each_connection(connection, resource) { + if (nla_len(adm_ctx.my_addr) == connection->my_addr_len && + !memcmp(nla_data(adm_ctx.my_addr), &connection->my_addr, + connection->my_addr_len)) { + retcode = ERR_LOCAL_ADDR; + goto out; + } - if (nla_len(adm_ctx.peer_addr) == tconn->peer_addr_len && - !memcmp(nla_data(adm_ctx.peer_addr), &tconn->peer_addr, tconn->peer_addr_len)) { - retcode = ERR_PEER_ADDR; - goto out; + if (nla_len(adm_ctx.peer_addr) == connection->peer_addr_len && + !memcmp(nla_data(adm_ctx.peer_addr), &connection->peer_addr, + connection->peer_addr_len)) { + retcode = ERR_PEER_ADDR; + goto out; + } } } - tconn = adm_ctx.tconn; - conn_reconfig_start(tconn); + mutex_lock(&adm_ctx.resource->adm_mutex); + connection = first_connection(adm_ctx.resource); + conn_reconfig_start(connection); - if (tconn->cstate > C_STANDALONE) { + if (connection->cstate > C_STANDALONE) { retcode = ERR_NET_CONFIGURED; goto fail; } /* allocation not in the IO path, drbdsetup / netlink process context */ - new_conf = kzalloc(sizeof(*new_conf), GFP_KERNEL); - if (!new_conf) { + new_net_conf = kzalloc(sizeof(*new_net_conf), GFP_KERNEL); + if (!new_net_conf) { retcode = ERR_NOMEM; goto fail; } - set_net_conf_defaults(new_conf); + set_net_conf_defaults(new_net_conf); - err = net_conf_from_attrs(new_conf, info); + err = net_conf_from_attrs(new_net_conf, info); if (err && err != -ENOMSG) { retcode = ERR_MANDATORY_TAG; - drbd_msg_put_info(from_attrs_err_to_txt(err)); + drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err)); goto fail; } - retcode = check_net_options(tconn, new_conf); + retcode = check_net_options(connection, new_net_conf); if (retcode != NO_ERROR) goto fail; - retcode = alloc_crypto(&crypto, new_conf); + retcode = alloc_crypto(&crypto, new_net_conf); if (retcode != NO_ERROR) goto fail; - ((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0; + ((char *)new_net_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0; - conn_flush_workqueue(tconn); + drbd_flush_workqueue(&connection->sender_work); - mutex_lock(&tconn->conf_update); - old_conf = tconn->net_conf; - if (old_conf) { + mutex_lock(&adm_ctx.resource->conf_update); + old_net_conf = connection->net_conf; + if (old_net_conf) { retcode = ERR_NET_CONFIGURED; - mutex_unlock(&tconn->conf_update); + mutex_unlock(&adm_ctx.resource->conf_update); goto fail; } - rcu_assign_pointer(tconn->net_conf, new_conf); + rcu_assign_pointer(connection->net_conf, new_net_conf); - conn_free_crypto(tconn); - tconn->cram_hmac_tfm = crypto.cram_hmac_tfm; - tconn->integrity_tfm = crypto.integrity_tfm; - tconn->csums_tfm = crypto.csums_tfm; - tconn->verify_tfm = crypto.verify_tfm; + conn_free_crypto(connection); + connection->cram_hmac_tfm = crypto.cram_hmac_tfm; + connection->integrity_tfm = crypto.integrity_tfm; + connection->csums_tfm = crypto.csums_tfm; + connection->verify_tfm = crypto.verify_tfm; - tconn->my_addr_len = nla_len(adm_ctx.my_addr); - memcpy(&tconn->my_addr, nla_data(adm_ctx.my_addr), tconn->my_addr_len); - tconn->peer_addr_len = nla_len(adm_ctx.peer_addr); - memcpy(&tconn->peer_addr, nla_data(adm_ctx.peer_addr), tconn->peer_addr_len); + connection->my_addr_len = nla_len(adm_ctx.my_addr); + memcpy(&connection->my_addr, nla_data(adm_ctx.my_addr), connection->my_addr_len); + connection->peer_addr_len = nla_len(adm_ctx.peer_addr); + memcpy(&connection->peer_addr, nla_data(adm_ctx.peer_addr), connection->peer_addr_len); - mutex_unlock(&tconn->conf_update); + mutex_unlock(&adm_ctx.resource->conf_update); rcu_read_lock(); - idr_for_each_entry(&tconn->volumes, mdev, i) { - mdev->send_cnt = 0; - mdev->recv_cnt = 0; + idr_for_each_entry(&connection->peer_devices, peer_device, i) { + struct drbd_device *device = peer_device->device; + device->send_cnt = 0; + device->recv_cnt = 0; } rcu_read_unlock(); - retcode = conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE); + retcode = conn_request_state(connection, NS(conn, C_UNCONNECTED), CS_VERBOSE); - conn_reconfig_done(tconn); - drbd_adm_finish(info, retcode); + conn_reconfig_done(connection); + mutex_unlock(&adm_ctx.resource->adm_mutex); + drbd_adm_finish(&adm_ctx, info, retcode); return 0; fail: free_crypto(&crypto); - kfree(new_conf); + kfree(new_net_conf); - conn_reconfig_done(tconn); + conn_reconfig_done(connection); + mutex_unlock(&adm_ctx.resource->adm_mutex); out: - drbd_adm_finish(info, retcode); + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } -static enum drbd_state_rv conn_try_disconnect(struct drbd_tconn *tconn, bool force) +static enum drbd_state_rv conn_try_disconnect(struct drbd_connection *connection, bool force) { enum drbd_state_rv rv; - rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING), + rv = conn_request_state(connection, NS(conn, C_DISCONNECTING), force ? CS_HARD : 0); switch (rv) { @@ -2210,18 +2389,18 @@ return SS_SUCCESS; case SS_PRIMARY_NOP: /* Our state checking code wants to see the peer outdated. */ - rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING, pdsk, D_OUTDATED), 0); + rv = conn_request_state(connection, NS2(conn, C_DISCONNECTING, pdsk, D_OUTDATED), 0); if (rv == SS_OUTDATE_WO_CONN) /* lost connection before graceful disconnect succeeded */ - rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_VERBOSE); + rv = conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_VERBOSE); break; case SS_CW_FAILED_BY_PEER: /* The peer probably wants to see us outdated. */ - rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING, + rv = conn_request_state(connection, NS2(conn, C_DISCONNECTING, disk, D_OUTDATED), 0); if (rv == SS_IS_DISKLESS || rv == SS_LOWER_THAN_OUTDATED) { - rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING), + rv = conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD); } break; @@ -2235,18 +2414,18 @@ * The state handling only uses drbd_thread_stop_nowait(), * we want to really wait here until the receiver is no more. */ - drbd_thread_stop(&adm_ctx.tconn->receiver); + drbd_thread_stop(&connection->receiver); /* Race breaker. This additional state change request may be * necessary, if this was a forced disconnect during a receiver * restart. We may have "killed" the receiver thread just - * after drbdd_init() returned. Typically, we should be + * after drbd_receiver() returned. Typically, we should be * C_STANDALONE already, now, and this becomes a no-op. */ - rv2 = conn_request_state(tconn, NS(conn, C_STANDALONE), + rv2 = conn_request_state(connection, NS(conn, C_STANDALONE), CS_VERBOSE | CS_HARD); if (rv2 < SS_SUCCESS) - conn_err(tconn, + drbd_err(connection, "unexpected rv2=%d in conn_try_disconnect()\n", rv2); } @@ -2255,106 +2434,114 @@ int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info) { + struct drbd_config_context adm_ctx; struct disconnect_parms parms; - struct drbd_tconn *tconn; + struct drbd_connection *connection; enum drbd_state_rv rv; enum drbd_ret_code retcode; int err; - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONNECTION); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_CONNECTION); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) goto fail; - tconn = adm_ctx.tconn; + connection = adm_ctx.connection; memset(&parms, 0, sizeof(parms)); if (info->attrs[DRBD_NLA_DISCONNECT_PARMS]) { err = disconnect_parms_from_attrs(&parms, info); if (err) { retcode = ERR_MANDATORY_TAG; - drbd_msg_put_info(from_attrs_err_to_txt(err)); + drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err)); goto fail; } } - rv = conn_try_disconnect(tconn, parms.force_disconnect); + mutex_lock(&adm_ctx.resource->adm_mutex); + rv = conn_try_disconnect(connection, parms.force_disconnect); if (rv < SS_SUCCESS) retcode = rv; /* FIXME: Type mismatch. */ else retcode = NO_ERROR; + mutex_unlock(&adm_ctx.resource->adm_mutex); fail: - drbd_adm_finish(info, retcode); + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } -void resync_after_online_grow(struct drbd_conf *mdev) +void resync_after_online_grow(struct drbd_device *device) { int iass; /* I am sync source */ - dev_info(DEV, "Resync of new storage after online grow\n"); - if (mdev->state.role != mdev->state.peer) - iass = (mdev->state.role == R_PRIMARY); + drbd_info(device, "Resync of new storage after online grow\n"); + if (device->state.role != device->state.peer) + iass = (device->state.role == R_PRIMARY); else - iass = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags); + iass = test_bit(RESOLVE_CONFLICTS, &first_peer_device(device)->connection->flags); if (iass) - drbd_start_resync(mdev, C_SYNC_SOURCE); + drbd_start_resync(device, C_SYNC_SOURCE); else - _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE); + _drbd_request_state(device, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE); } int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info) { + struct drbd_config_context adm_ctx; struct disk_conf *old_disk_conf, *new_disk_conf = NULL; struct resize_parms rs; - struct drbd_conf *mdev; + struct drbd_device *device; enum drbd_ret_code retcode; enum determine_dev_size dd; + bool change_al_layout = false; enum dds_flags ddsf; sector_t u_size; int err; - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) + goto finish; + + mutex_lock(&adm_ctx.resource->adm_mutex); + device = adm_ctx.device; + if (!get_ldev(device)) { + retcode = ERR_NO_DISK; goto fail; + } memset(&rs, 0, sizeof(struct resize_parms)); + rs.al_stripes = device->ldev->md.al_stripes; + rs.al_stripe_size = device->ldev->md.al_stripe_size_4k * 4; if (info->attrs[DRBD_NLA_RESIZE_PARMS]) { err = resize_parms_from_attrs(&rs, info); if (err) { retcode = ERR_MANDATORY_TAG; - drbd_msg_put_info(from_attrs_err_to_txt(err)); - goto fail; + drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err)); + goto fail_ldev; } } - mdev = adm_ctx.mdev; - if (mdev->state.conn > C_CONNECTED) { + if (device->state.conn > C_CONNECTED) { retcode = ERR_RESIZE_RESYNC; - goto fail; + goto fail_ldev; } - if (mdev->state.role == R_SECONDARY && - mdev->state.peer == R_SECONDARY) { + if (device->state.role == R_SECONDARY && + device->state.peer == R_SECONDARY) { retcode = ERR_NO_PRIMARY; - goto fail; - } - - if (!get_ldev(mdev)) { - retcode = ERR_NO_DISK; - goto fail; + goto fail_ldev; } - if (rs.no_resync && mdev->tconn->agreed_pro_version < 93) { + if (rs.no_resync && first_peer_device(device)->connection->agreed_pro_version < 93) { retcode = ERR_NEED_APV_93; goto fail_ldev; } rcu_read_lock(); - u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size; + u_size = rcu_dereference(device->ldev->disk_conf)->disk_size; rcu_read_unlock(); if (u_size != (sector_t)rs.resize_size) { new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL); @@ -2364,222 +2551,277 @@ } } - if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) - mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev); + if (device->ldev->md.al_stripes != rs.al_stripes || + device->ldev->md.al_stripe_size_4k != rs.al_stripe_size / 4) { + u32 al_size_k = rs.al_stripes * rs.al_stripe_size; + + if (al_size_k > (16 * 1024 * 1024)) { + retcode = ERR_MD_LAYOUT_TOO_BIG; + goto fail_ldev; + } + + if (al_size_k < MD_32kB_SECT/2) { + retcode = ERR_MD_LAYOUT_TOO_SMALL; + goto fail_ldev; + } + + if (device->state.conn != C_CONNECTED && !rs.resize_force) { + retcode = ERR_MD_LAYOUT_CONNECTED; + goto fail_ldev; + } + + change_al_layout = true; + } + + if (device->ldev->known_size != drbd_get_capacity(device->ldev->backing_bdev)) + device->ldev->known_size = drbd_get_capacity(device->ldev->backing_bdev); if (new_disk_conf) { - mutex_lock(&mdev->tconn->conf_update); - old_disk_conf = mdev->ldev->disk_conf; + mutex_lock(&device->resource->conf_update); + old_disk_conf = device->ldev->disk_conf; *new_disk_conf = *old_disk_conf; new_disk_conf->disk_size = (sector_t)rs.resize_size; - rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf); - mutex_unlock(&mdev->tconn->conf_update); + rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf); + mutex_unlock(&device->resource->conf_update); synchronize_rcu(); kfree(old_disk_conf); } ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0); - dd = drbd_determine_dev_size(mdev, ddsf); - drbd_md_sync(mdev); - put_ldev(mdev); - if (dd == dev_size_error) { + dd = drbd_determine_dev_size(device, ddsf, change_al_layout ? &rs : NULL); + drbd_md_sync(device); + put_ldev(device); + if (dd == DS_ERROR) { retcode = ERR_NOMEM_BITMAP; goto fail; + } else if (dd == DS_ERROR_SPACE_MD) { + retcode = ERR_MD_LAYOUT_NO_FIT; + goto fail; + } else if (dd == DS_ERROR_SHRINK) { + retcode = ERR_IMPLICIT_SHRINK; + goto fail; } - if (mdev->state.conn == C_CONNECTED) { - if (dd == grew) - set_bit(RESIZE_PENDING, &mdev->flags); + if (device->state.conn == C_CONNECTED) { + if (dd == DS_GREW) + set_bit(RESIZE_PENDING, &device->flags); - drbd_send_uuids(mdev); - drbd_send_sizes(mdev, 1, ddsf); + drbd_send_uuids(first_peer_device(device)); + drbd_send_sizes(first_peer_device(device), 1, ddsf); } fail: - drbd_adm_finish(info, retcode); + mutex_unlock(&adm_ctx.resource->adm_mutex); + finish: + drbd_adm_finish(&adm_ctx, info, retcode); return 0; fail_ldev: - put_ldev(mdev); + put_ldev(device); goto fail; } int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info) { + struct drbd_config_context adm_ctx; enum drbd_ret_code retcode; - struct drbd_tconn *tconn; struct res_opts res_opts; int err; - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) goto fail; - tconn = adm_ctx.tconn; - res_opts = tconn->res_opts; + res_opts = adm_ctx.resource->res_opts; if (should_set_defaults(info)) set_res_opts_defaults(&res_opts); err = res_opts_from_attrs(&res_opts, info); if (err && err != -ENOMSG) { retcode = ERR_MANDATORY_TAG; - drbd_msg_put_info(from_attrs_err_to_txt(err)); + drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err)); goto fail; } - err = set_resource_options(tconn, &res_opts); + mutex_lock(&adm_ctx.resource->adm_mutex); + err = set_resource_options(adm_ctx.resource, &res_opts); if (err) { retcode = ERR_INVALID_REQUEST; if (err == -ENOMEM) retcode = ERR_NOMEM; } + mutex_unlock(&adm_ctx.resource->adm_mutex); fail: - drbd_adm_finish(info, retcode); + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info) { - struct drbd_conf *mdev; + struct drbd_config_context adm_ctx; + struct drbd_device *device; int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */ - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) goto out; - mdev = adm_ctx.mdev; + device = adm_ctx.device; + if (!get_ldev(device)) { + retcode = ERR_NO_DISK; + goto out; + } + + mutex_lock(&adm_ctx.resource->adm_mutex); /* If there is still bitmap IO pending, probably because of a previous * resync just being finished, wait for it before requesting a new resync. * Also wait for it's after_state_ch(). */ - drbd_suspend_io(mdev); - wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags)); - drbd_flush_workqueue(mdev); + drbd_suspend_io(device); + wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags)); + drbd_flush_workqueue(&first_peer_device(device)->connection->sender_work); /* If we happen to be C_STANDALONE R_SECONDARY, just change to * D_INCONSISTENT, and set all bits in the bitmap. Otherwise, * try to start a resync handshake as sync target for full sync. */ - if (mdev->state.conn == C_STANDALONE && mdev->state.role == R_SECONDARY) { - retcode = drbd_request_state(mdev, NS(disk, D_INCONSISTENT)); + if (device->state.conn == C_STANDALONE && device->state.role == R_SECONDARY) { + retcode = drbd_request_state(device, NS(disk, D_INCONSISTENT)); if (retcode >= SS_SUCCESS) { - if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, + if (drbd_bitmap_io(device, &drbd_bmio_set_n_write, "set_n_write from invalidate", BM_LOCKED_MASK)) retcode = ERR_IO_MD_DISK; } } else - retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T)); - drbd_resume_io(mdev); - + retcode = drbd_request_state(device, NS(conn, C_STARTING_SYNC_T)); + drbd_resume_io(device); + mutex_unlock(&adm_ctx.resource->adm_mutex); + put_ldev(device); out: - drbd_adm_finish(info, retcode); + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *info, union drbd_state mask, union drbd_state val) { + struct drbd_config_context adm_ctx; enum drbd_ret_code retcode; - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) goto out; - retcode = drbd_request_state(adm_ctx.mdev, mask, val); + mutex_lock(&adm_ctx.resource->adm_mutex); + retcode = drbd_request_state(adm_ctx.device, mask, val); + mutex_unlock(&adm_ctx.resource->adm_mutex); out: - drbd_adm_finish(info, retcode); + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } -static int drbd_bmio_set_susp_al(struct drbd_conf *mdev) +static int drbd_bmio_set_susp_al(struct drbd_device *device) __must_hold(local) { int rv; - rv = drbd_bmio_set_n_write(mdev); - drbd_suspend_al(mdev); + rv = drbd_bmio_set_n_write(device); + drbd_suspend_al(device); return rv; } int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info) { + struct drbd_config_context adm_ctx; int retcode; /* drbd_ret_code, drbd_state_rv */ - struct drbd_conf *mdev; + struct drbd_device *device; - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) goto out; - mdev = adm_ctx.mdev; + device = adm_ctx.device; + if (!get_ldev(device)) { + retcode = ERR_NO_DISK; + goto out; + } + + mutex_lock(&adm_ctx.resource->adm_mutex); /* If there is still bitmap IO pending, probably because of a previous * resync just being finished, wait for it before requesting a new resync. * Also wait for it's after_state_ch(). */ - drbd_suspend_io(mdev); - wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags)); - drbd_flush_workqueue(mdev); + drbd_suspend_io(device); + wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags)); + drbd_flush_workqueue(&first_peer_device(device)->connection->sender_work); /* If we happen to be C_STANDALONE R_PRIMARY, just set all bits * in the bitmap. Otherwise, try to start a resync handshake * as sync source for full sync. */ - if (mdev->state.conn == C_STANDALONE && mdev->state.role == R_PRIMARY) { + if (device->state.conn == C_STANDALONE && device->state.role == R_PRIMARY) { /* The peer will get a resync upon connect anyways. Just make that into a full resync. */ - retcode = drbd_request_state(mdev, NS(pdsk, D_INCONSISTENT)); + retcode = drbd_request_state(device, NS(pdsk, D_INCONSISTENT)); if (retcode >= SS_SUCCESS) { - if (drbd_bitmap_io(mdev, &drbd_bmio_set_susp_al, + if (drbd_bitmap_io(device, &drbd_bmio_set_susp_al, "set_n_write from invalidate_peer", BM_LOCKED_SET_ALLOWED)) retcode = ERR_IO_MD_DISK; } } else - retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S)); - drbd_resume_io(mdev); - + retcode = drbd_request_state(device, NS(conn, C_STARTING_SYNC_S)); + drbd_resume_io(device); + mutex_unlock(&adm_ctx.resource->adm_mutex); + put_ldev(device); out: - drbd_adm_finish(info, retcode); + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info) { + struct drbd_config_context adm_ctx; enum drbd_ret_code retcode; - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) goto out; - if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 1)) == SS_NOTHING_TO_DO) + mutex_lock(&adm_ctx.resource->adm_mutex); + if (drbd_request_state(adm_ctx.device, NS(user_isp, 1)) == SS_NOTHING_TO_DO) retcode = ERR_PAUSE_IS_SET; + mutex_unlock(&adm_ctx.resource->adm_mutex); out: - drbd_adm_finish(info, retcode); + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info) { + struct drbd_config_context adm_ctx; union drbd_dev_state s; enum drbd_ret_code retcode; - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) goto out; - if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) { - s = adm_ctx.mdev->state; + mutex_lock(&adm_ctx.resource->adm_mutex); + if (drbd_request_state(adm_ctx.device, NS(user_isp, 0)) == SS_NOTHING_TO_DO) { + s = adm_ctx.device->state; if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) { retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP : s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR; @@ -2587,9 +2829,9 @@ retcode = ERR_PAUSE_IS_CLEAR; } } - + mutex_unlock(&adm_ctx.resource->adm_mutex); out: - drbd_adm_finish(info, retcode); + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } @@ -2600,32 +2842,34 @@ int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info) { - struct drbd_conf *mdev; + struct drbd_config_context adm_ctx; + struct drbd_device *device; int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */ - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) goto out; - mdev = adm_ctx.mdev; - if (test_bit(NEW_CUR_UUID, &mdev->flags)) { - drbd_uuid_new_current(mdev); - clear_bit(NEW_CUR_UUID, &mdev->flags); + mutex_lock(&adm_ctx.resource->adm_mutex); + device = adm_ctx.device; + if (test_bit(NEW_CUR_UUID, &device->flags)) { + drbd_uuid_new_current(device); + clear_bit(NEW_CUR_UUID, &device->flags); } - drbd_suspend_io(mdev); - retcode = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0)); + drbd_suspend_io(device); + retcode = drbd_request_state(device, NS3(susp, 0, susp_nod, 0, susp_fen, 0)); if (retcode == SS_SUCCESS) { - if (mdev->state.conn < C_CONNECTED) - tl_clear(mdev->tconn); - if (mdev->state.disk == D_DISKLESS || mdev->state.disk == D_FAILED) - tl_restart(mdev->tconn, FAIL_FROZEN_DISK_IO); + if (device->state.conn < C_CONNECTED) + tl_clear(first_peer_device(device)->connection); + if (device->state.disk == D_DISKLESS || device->state.disk == D_FAILED) + tl_restart(first_peer_device(device)->connection, FAIL_FROZEN_DISK_IO); } - drbd_resume_io(mdev); - + drbd_resume_io(device); + mutex_unlock(&adm_ctx.resource->adm_mutex); out: - drbd_adm_finish(info, retcode); + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } @@ -2634,23 +2878,28 @@ return drbd_adm_simple_request_state(skb, info, NS(disk, D_OUTDATED)); } -int nla_put_drbd_cfg_context(struct sk_buff *skb, struct drbd_tconn *tconn, unsigned vnr) +static int nla_put_drbd_cfg_context(struct sk_buff *skb, + struct drbd_resource *resource, + struct drbd_connection *connection, + struct drbd_device *device) { struct nlattr *nla; nla = nla_nest_start(skb, DRBD_NLA_CFG_CONTEXT); if (!nla) goto nla_put_failure; - if (vnr != VOLUME_UNSPECIFIED && - nla_put_u32(skb, T_ctx_volume, vnr)) - goto nla_put_failure; - if (nla_put_string(skb, T_ctx_resource_name, tconn->name)) - goto nla_put_failure; - if (tconn->my_addr_len && - nla_put(skb, T_ctx_my_addr, tconn->my_addr_len, &tconn->my_addr)) + if (device && + nla_put_u32(skb, T_ctx_volume, device->vnr)) goto nla_put_failure; - if (tconn->peer_addr_len && - nla_put(skb, T_ctx_peer_addr, tconn->peer_addr_len, &tconn->peer_addr)) + if (nla_put_string(skb, T_ctx_resource_name, resource->name)) goto nla_put_failure; + if (connection) { + if (connection->my_addr_len && + nla_put(skb, T_ctx_my_addr, connection->my_addr_len, &connection->my_addr)) + goto nla_put_failure; + if (connection->peer_addr_len && + nla_put(skb, T_ctx_peer_addr, connection->peer_addr_len, &connection->peer_addr)) + goto nla_put_failure; + } nla_nest_end(skb, nla); return 0; @@ -2660,11 +2909,23 @@ return -EMSGSIZE; } -int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev, +/* + * Return the connection of @resource if @resource has exactly one connection. + */ +static struct drbd_connection *the_only_connection(struct drbd_resource *resource) +{ + struct list_head *connections = &resource->connections; + + if (list_empty(connections) || connections->next->next != connections) + return NULL; + return list_first_entry(&resource->connections, struct drbd_connection, connections); +} + +static int nla_put_status_info(struct sk_buff *skb, struct drbd_device *device, const struct sib_info *sib) { + struct drbd_resource *resource = device->resource; struct state_info *si = NULL; /* for sizeof(si->member); */ - struct net_conf *nc; struct nlattr *nla; int got_ldev; int err = 0; @@ -2683,24 +2944,30 @@ * always in the context of the receiving process */ exclude_sensitive = sib || !capable(CAP_SYS_ADMIN); - got_ldev = get_ldev(mdev); + got_ldev = get_ldev(device); /* We need to add connection name and volume number information still. * Minor number is in drbd_genlmsghdr. */ - if (nla_put_drbd_cfg_context(skb, mdev->tconn, mdev->vnr)) + if (nla_put_drbd_cfg_context(skb, resource, the_only_connection(resource), device)) goto nla_put_failure; - if (res_opts_to_skb(skb, &mdev->tconn->res_opts, exclude_sensitive)) + if (res_opts_to_skb(skb, &device->resource->res_opts, exclude_sensitive)) goto nla_put_failure; rcu_read_lock(); - if (got_ldev) - if (disk_conf_to_skb(skb, rcu_dereference(mdev->ldev->disk_conf), exclude_sensitive)) - goto nla_put_failure; + if (got_ldev) { + struct disk_conf *disk_conf; + + disk_conf = rcu_dereference(device->ldev->disk_conf); + err = disk_conf_to_skb(skb, disk_conf, exclude_sensitive); + } + if (!err) { + struct net_conf *nc; - nc = rcu_dereference(mdev->tconn->net_conf); - if (nc) - err = net_conf_to_skb(skb, nc, exclude_sensitive); + nc = rcu_dereference(first_peer_device(device)->connection->net_conf); + if (nc) + err = net_conf_to_skb(skb, nc, exclude_sensitive); + } rcu_read_unlock(); if (err) goto nla_put_failure; @@ -2709,38 +2976,38 @@ if (!nla) goto nla_put_failure; if (nla_put_u32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY) || - nla_put_u32(skb, T_current_state, mdev->state.i) || - nla_put_u64(skb, T_ed_uuid, mdev->ed_uuid) || - nla_put_u64(skb, T_capacity, drbd_get_capacity(mdev->this_bdev)) || - nla_put_u64(skb, T_send_cnt, mdev->send_cnt) || - nla_put_u64(skb, T_recv_cnt, mdev->recv_cnt) || - nla_put_u64(skb, T_read_cnt, mdev->read_cnt) || - nla_put_u64(skb, T_writ_cnt, mdev->writ_cnt) || - nla_put_u64(skb, T_al_writ_cnt, mdev->al_writ_cnt) || - nla_put_u64(skb, T_bm_writ_cnt, mdev->bm_writ_cnt) || - nla_put_u32(skb, T_ap_bio_cnt, atomic_read(&mdev->ap_bio_cnt)) || - nla_put_u32(skb, T_ap_pending_cnt, atomic_read(&mdev->ap_pending_cnt)) || - nla_put_u32(skb, T_rs_pending_cnt, atomic_read(&mdev->rs_pending_cnt))) + nla_put_u32(skb, T_current_state, device->state.i) || + nla_put_u64(skb, T_ed_uuid, device->ed_uuid) || + nla_put_u64(skb, T_capacity, drbd_get_capacity(device->this_bdev)) || + nla_put_u64(skb, T_send_cnt, device->send_cnt) || + nla_put_u64(skb, T_recv_cnt, device->recv_cnt) || + nla_put_u64(skb, T_read_cnt, device->read_cnt) || + nla_put_u64(skb, T_writ_cnt, device->writ_cnt) || + nla_put_u64(skb, T_al_writ_cnt, device->al_writ_cnt) || + nla_put_u64(skb, T_bm_writ_cnt, device->bm_writ_cnt) || + nla_put_u32(skb, T_ap_bio_cnt, atomic_read(&device->ap_bio_cnt)) || + nla_put_u32(skb, T_ap_pending_cnt, atomic_read(&device->ap_pending_cnt)) || + nla_put_u32(skb, T_rs_pending_cnt, atomic_read(&device->rs_pending_cnt))) goto nla_put_failure; if (got_ldev) { int err; - spin_lock_irq(&mdev->ldev->md.uuid_lock); - err = nla_put(skb, T_uuids, sizeof(si->uuids), mdev->ldev->md.uuid); - spin_unlock_irq(&mdev->ldev->md.uuid_lock); + spin_lock_irq(&device->ldev->md.uuid_lock); + err = nla_put(skb, T_uuids, sizeof(si->uuids), device->ldev->md.uuid); + spin_unlock_irq(&device->ldev->md.uuid_lock); if (err) goto nla_put_failure; - if (nla_put_u32(skb, T_disk_flags, mdev->ldev->md.flags) || - nla_put_u64(skb, T_bits_total, drbd_bm_bits(mdev)) || - nla_put_u64(skb, T_bits_oos, drbd_bm_total_weight(mdev))) + if (nla_put_u32(skb, T_disk_flags, device->ldev->md.flags) || + nla_put_u64(skb, T_bits_total, drbd_bm_bits(device)) || + nla_put_u64(skb, T_bits_oos, drbd_bm_total_weight(device))) goto nla_put_failure; - if (C_SYNC_SOURCE <= mdev->state.conn && - C_PAUSED_SYNC_T >= mdev->state.conn) { - if (nla_put_u64(skb, T_bits_rs_total, mdev->rs_total) || - nla_put_u64(skb, T_bits_rs_failed, mdev->rs_failed)) + if (C_SYNC_SOURCE <= device->state.conn && + C_PAUSED_SYNC_T >= device->state.conn) { + if (nla_put_u64(skb, T_bits_rs_total, device->rs_total) || + nla_put_u64(skb, T_bits_rs_failed, device->rs_failed)) goto nla_put_failure; } } @@ -2772,47 +3039,49 @@ nla_put_failure: err = -EMSGSIZE; if (got_ldev) - put_ldev(mdev); + put_ldev(device); return err; } int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info) { + struct drbd_config_context adm_ctx; enum drbd_ret_code retcode; int err; - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) goto out; - err = nla_put_status_info(adm_ctx.reply_skb, adm_ctx.mdev, NULL); + err = nla_put_status_info(adm_ctx.reply_skb, adm_ctx.device, NULL); if (err) { nlmsg_free(adm_ctx.reply_skb); return err; } out: - drbd_adm_finish(info, retcode); + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } -int get_one_status(struct sk_buff *skb, struct netlink_callback *cb) +static int get_one_status(struct sk_buff *skb, struct netlink_callback *cb) { - struct drbd_conf *mdev; + struct drbd_device *device; struct drbd_genlmsghdr *dh; - struct drbd_tconn *pos = (struct drbd_tconn*)cb->args[0]; - struct drbd_tconn *tconn = NULL; - struct drbd_tconn *tmp; + struct drbd_resource *pos = (struct drbd_resource *)cb->args[0]; + struct drbd_resource *resource = NULL; + struct drbd_resource *tmp; unsigned volume = cb->args[1]; /* Open coded, deferred, iteration: - * list_for_each_entry_safe(tconn, tmp, &drbd_tconns, all_tconn) { - * idr_for_each_entry(&tconn->volumes, mdev, i) { + * for_each_resource_safe(resource, tmp, &drbd_resources) { + * connection = "first connection of resource or undefined"; + * idr_for_each_entry(&resource->devices, device, i) { * ... * } * } - * where tconn is cb->args[0]; + * where resource is cb->args[0]; * and i is cb->args[1]; * * cb->args[2] indicates if we shall loop over all resources, @@ -2821,44 +3090,44 @@ * This may miss entries inserted after this dump started, * or entries deleted before they are reached. * - * We need to make sure the mdev won't disappear while + * We need to make sure the device won't disappear while * we are looking at it, and revalidate our iterators * on each iteration. */ - /* synchronize with conn_create()/conn_destroy() */ + /* synchronize with conn_create()/drbd_destroy_connection() */ rcu_read_lock(); /* revalidate iterator position */ - list_for_each_entry_rcu(tmp, &drbd_tconns, all_tconn) { + for_each_resource_rcu(tmp, &drbd_resources) { if (pos == NULL) { /* first iteration */ pos = tmp; - tconn = pos; + resource = pos; break; } if (tmp == pos) { - tconn = pos; + resource = pos; break; } } - if (tconn) { -next_tconn: - mdev = idr_get_next(&tconn->volumes, &volume); - if (!mdev) { - /* No more volumes to dump on this tconn. - * Advance tconn iterator. */ - pos = list_entry_rcu(tconn->all_tconn.next, - struct drbd_tconn, all_tconn); - /* Did we dump any volume on this tconn yet? */ + if (resource) { +next_resource: + device = idr_get_next(&resource->devices, &volume); + if (!device) { + /* No more volumes to dump on this resource. + * Advance resource iterator. */ + pos = list_entry_rcu(resource->resources.next, + struct drbd_resource, resources); + /* Did we dump any volume of this resource yet? */ if (volume != 0) { /* If we reached the end of the list, * or only a single resource dump was requested, * we are done. */ - if (&pos->all_tconn == &drbd_tconns || cb->args[2]) + if (&pos->resources == &drbd_resources || cb->args[2]) goto out; volume = 0; - tconn = pos; - goto next_tconn; + resource = pos; + goto next_resource; } } @@ -2868,43 +3137,49 @@ if (!dh) goto out; - if (!mdev) { - /* This is a tconn without a single volume. + if (!device) { + /* This is a connection without a single volume. * Suprisingly enough, it may have a network * configuration. */ - struct net_conf *nc; + struct drbd_connection *connection; + dh->minor = -1U; dh->ret_code = NO_ERROR; - if (nla_put_drbd_cfg_context(skb, tconn, VOLUME_UNSPECIFIED)) - goto cancel; - nc = rcu_dereference(tconn->net_conf); - if (nc && net_conf_to_skb(skb, nc, 1) != 0) + connection = the_only_connection(resource); + if (nla_put_drbd_cfg_context(skb, resource, connection, NULL)) goto cancel; + if (connection) { + struct net_conf *nc; + + nc = rcu_dereference(connection->net_conf); + if (nc && net_conf_to_skb(skb, nc, 1) != 0) + goto cancel; + } goto done; } - D_ASSERT(mdev->vnr == volume); - D_ASSERT(mdev->tconn == tconn); + D_ASSERT(device, device->vnr == volume); + D_ASSERT(device, device->resource == resource); - dh->minor = mdev_to_minor(mdev); + dh->minor = device_to_minor(device); dh->ret_code = NO_ERROR; - if (nla_put_status_info(skb, mdev, NULL)) { + if (nla_put_status_info(skb, device, NULL)) { cancel: genlmsg_cancel(skb, dh); goto out; } done: genlmsg_end(skb, dh); - } + } out: rcu_read_unlock(); /* where to start the next iteration */ - cb->args[0] = (long)pos; - cb->args[1] = (pos == tconn) ? volume + 1 : 0; + cb->args[0] = (long)pos; + cb->args[1] = (pos == resource) ? volume + 1 : 0; - /* No more tconns/volumes/minors found results in an empty skb. + /* No more resources/volumes/minors found results in an empty skb. * Which will terminate the dump. */ return skb->len; } @@ -2924,7 +3199,7 @@ const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ; struct nlattr *nla; const char *resource_name; - struct drbd_tconn *tconn; + struct drbd_resource *resource; int maxtype; /* Is this a followup call? */ @@ -2953,18 +3228,19 @@ if (!nla) return -EINVAL; resource_name = nla_data(nla); - tconn = conn_get_by_name(resource_name); - - if (!tconn) + if (!*resource_name) + return -ENODEV; + resource = drbd_find_resource(resource_name); + if (!resource) return -ENODEV; - kref_put(&tconn->kref, &conn_destroy); /* get_one_status() (re)validates tconn by itself */ + kref_put(&resource->kref, drbd_destroy_resource); /* get_one_status() revalidates the resource */ /* prime iterators, and set "filter" mode mark: - * only dump this tconn. */ - cb->args[0] = (long)tconn; + * only dump this connection. */ + cb->args[0] = (long)resource; /* cb->args[1] = 0; passed in this way. */ - cb->args[2] = (long)tconn; + cb->args[2] = (long)resource; dump: return get_one_status(skb, cb); @@ -2972,19 +3248,20 @@ int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info) { + struct drbd_config_context adm_ctx; enum drbd_ret_code retcode; struct timeout_parms tp; int err; - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) goto out; tp.timeout_type = - adm_ctx.mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED : - test_bit(USE_DEGR_WFC_T, &adm_ctx.mdev->flags) ? UT_DEGRADED : + adm_ctx.device->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED : + test_bit(USE_DEGR_WFC_T, &adm_ctx.device->flags) ? UT_DEGRADED : UT_DEFAULT; err = timeout_parms_to_priv_skb(adm_ctx.reply_skb, &tp); @@ -2993,135 +3270,145 @@ return err; } out: - drbd_adm_finish(info, retcode); + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info) { - struct drbd_conf *mdev; + struct drbd_config_context adm_ctx; + struct drbd_device *device; enum drbd_ret_code retcode; struct start_ov_parms parms; - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) goto out; - mdev = adm_ctx.mdev; + device = adm_ctx.device; /* resume from last known position, if possible */ - parms.ov_start_sector = mdev->ov_start_sector; + parms.ov_start_sector = device->ov_start_sector; parms.ov_stop_sector = ULLONG_MAX; if (info->attrs[DRBD_NLA_START_OV_PARMS]) { int err = start_ov_parms_from_attrs(&parms, info); if (err) { retcode = ERR_MANDATORY_TAG; - drbd_msg_put_info(from_attrs_err_to_txt(err)); + drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err)); goto out; } } + mutex_lock(&adm_ctx.resource->adm_mutex); + /* w_make_ov_request expects position to be aligned */ - mdev->ov_start_sector = parms.ov_start_sector & ~(BM_SECT_PER_BIT-1); - mdev->ov_stop_sector = parms.ov_stop_sector; + device->ov_start_sector = parms.ov_start_sector & ~(BM_SECT_PER_BIT-1); + device->ov_stop_sector = parms.ov_stop_sector; /* If there is still bitmap IO pending, e.g. previous resync or verify * just being finished, wait for it before requesting a new resync. */ - drbd_suspend_io(mdev); - wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags)); - retcode = drbd_request_state(mdev,NS(conn,C_VERIFY_S)); - drbd_resume_io(mdev); + drbd_suspend_io(device); + wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags)); + retcode = drbd_request_state(device, NS(conn, C_VERIFY_S)); + drbd_resume_io(device); + + mutex_unlock(&adm_ctx.resource->adm_mutex); out: - drbd_adm_finish(info, retcode); + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info) { - struct drbd_conf *mdev; + struct drbd_config_context adm_ctx; + struct drbd_device *device; enum drbd_ret_code retcode; int skip_initial_sync = 0; int err; struct new_c_uuid_parms args; - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) goto out_nolock; - mdev = adm_ctx.mdev; + device = adm_ctx.device; memset(&args, 0, sizeof(args)); if (info->attrs[DRBD_NLA_NEW_C_UUID_PARMS]) { err = new_c_uuid_parms_from_attrs(&args, info); if (err) { retcode = ERR_MANDATORY_TAG; - drbd_msg_put_info(from_attrs_err_to_txt(err)); + drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err)); goto out_nolock; } } - mutex_lock(mdev->state_mutex); /* Protects us against serialized state changes. */ + mutex_lock(&adm_ctx.resource->adm_mutex); + mutex_lock(device->state_mutex); /* Protects us against serialized state changes. */ - if (!get_ldev(mdev)) { + if (!get_ldev(device)) { retcode = ERR_NO_DISK; goto out; } /* this is "skip initial sync", assume to be clean */ - if (mdev->state.conn == C_CONNECTED && mdev->tconn->agreed_pro_version >= 90 && - mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) { - dev_info(DEV, "Preparing to skip initial sync\n"); + if (device->state.conn == C_CONNECTED && + first_peer_device(device)->connection->agreed_pro_version >= 90 && + device->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) { + drbd_info(device, "Preparing to skip initial sync\n"); skip_initial_sync = 1; - } else if (mdev->state.conn != C_STANDALONE) { + } else if (device->state.conn != C_STANDALONE) { retcode = ERR_CONNECTED; goto out_dec; } - drbd_uuid_set(mdev, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */ - drbd_uuid_new_current(mdev); /* New current, previous to UI_BITMAP */ + drbd_uuid_set(device, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */ + drbd_uuid_new_current(device); /* New current, previous to UI_BITMAP */ if (args.clear_bm) { - err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write, + err = drbd_bitmap_io(device, &drbd_bmio_clear_n_write, "clear_n_write from new_c_uuid", BM_LOCKED_MASK); if (err) { - dev_err(DEV, "Writing bitmap failed with %d\n",err); + drbd_err(device, "Writing bitmap failed with %d\n", err); retcode = ERR_IO_MD_DISK; } if (skip_initial_sync) { - drbd_send_uuids_skip_initial_sync(mdev); - _drbd_uuid_set(mdev, UI_BITMAP, 0); - drbd_print_uuids(mdev, "cleared bitmap UUID"); - spin_lock_irq(&mdev->tconn->req_lock); - _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE), + drbd_send_uuids_skip_initial_sync(first_peer_device(device)); + _drbd_uuid_set(device, UI_BITMAP, 0); + drbd_print_uuids(device, "cleared bitmap UUID"); + spin_lock_irq(&device->resource->req_lock); + _drbd_set_state(_NS2(device, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE), CS_VERBOSE, NULL); - spin_unlock_irq(&mdev->tconn->req_lock); + spin_unlock_irq(&device->resource->req_lock); } } - drbd_md_sync(mdev); + drbd_md_sync(device); out_dec: - put_ldev(mdev); + put_ldev(device); out: - mutex_unlock(mdev->state_mutex); + mutex_unlock(device->state_mutex); + mutex_unlock(&adm_ctx.resource->adm_mutex); out_nolock: - drbd_adm_finish(info, retcode); + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } static enum drbd_ret_code -drbd_check_resource_name(const char *name) +drbd_check_resource_name(struct drbd_config_context *adm_ctx) { + const char *name = adm_ctx->resource_name; if (!name || !name[0]) { - drbd_msg_put_info("resource name missing"); + drbd_msg_put_info(adm_ctx->reply_skb, "resource name missing"); return ERR_MANDATORY_TAG; } /* if we want to use these in sysfs/configfs/debugfs some day, * we must not allow slashes */ if (strchr(name, '/')) { - drbd_msg_put_info("invalid resource name"); + drbd_msg_put_info(adm_ctx->reply_skb, "invalid resource name"); return ERR_INVALID_REQUEST; } return NO_ERROR; @@ -3129,11 +3416,12 @@ int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info) { + struct drbd_config_context adm_ctx; enum drbd_ret_code retcode; struct res_opts res_opts; int err; - retcode = drbd_adm_prepare(skb, info, 0); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, 0); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) @@ -3143,207 +3431,211 @@ err = res_opts_from_attrs(&res_opts, info); if (err && err != -ENOMSG) { retcode = ERR_MANDATORY_TAG; - drbd_msg_put_info(from_attrs_err_to_txt(err)); + drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err)); goto out; } - retcode = drbd_check_resource_name(adm_ctx.resource_name); + retcode = drbd_check_resource_name(&adm_ctx); if (retcode != NO_ERROR) goto out; - if (adm_ctx.tconn) { + if (adm_ctx.resource) { if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) { retcode = ERR_INVALID_REQUEST; - drbd_msg_put_info("resource exists"); + drbd_msg_put_info(adm_ctx.reply_skb, "resource exists"); } /* else: still NO_ERROR */ goto out; } + /* not yet safe for genl_family.parallel_ops */ if (!conn_create(adm_ctx.resource_name, &res_opts)) retcode = ERR_NOMEM; out: - drbd_adm_finish(info, retcode); + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } -int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info) +int drbd_adm_new_minor(struct sk_buff *skb, struct genl_info *info) { + struct drbd_config_context adm_ctx; struct drbd_genlmsghdr *dh = info->userhdr; enum drbd_ret_code retcode; - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) goto out; if (dh->minor > MINORMASK) { - drbd_msg_put_info("requested minor out of range"); + drbd_msg_put_info(adm_ctx.reply_skb, "requested minor out of range"); retcode = ERR_INVALID_REQUEST; goto out; } if (adm_ctx.volume > DRBD_VOLUME_MAX) { - drbd_msg_put_info("requested volume id out of range"); + drbd_msg_put_info(adm_ctx.reply_skb, "requested volume id out of range"); retcode = ERR_INVALID_REQUEST; goto out; } /* drbd_adm_prepare made sure already - * that mdev->tconn and mdev->vnr match the request. */ - if (adm_ctx.mdev) { + * that first_peer_device(device)->connection and device->vnr match the request. */ + if (adm_ctx.device) { if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) - retcode = ERR_MINOR_EXISTS; + retcode = ERR_MINOR_OR_VOLUME_EXISTS; /* else: still NO_ERROR */ goto out; } - retcode = conn_new_minor(adm_ctx.tconn, dh->minor, adm_ctx.volume); + mutex_lock(&adm_ctx.resource->adm_mutex); + retcode = drbd_create_device(&adm_ctx, dh->minor); + mutex_unlock(&adm_ctx.resource->adm_mutex); out: - drbd_adm_finish(info, retcode); + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } -static enum drbd_ret_code adm_delete_minor(struct drbd_conf *mdev) +static enum drbd_ret_code adm_del_minor(struct drbd_device *device) { - if (mdev->state.disk == D_DISKLESS && - /* no need to be mdev->state.conn == C_STANDALONE && + if (device->state.disk == D_DISKLESS && + /* no need to be device->state.conn == C_STANDALONE && * we may want to delete a minor from a live replication group. */ - mdev->state.role == R_SECONDARY) { - _drbd_request_state(mdev, NS(conn, C_WF_REPORT_PARAMS), + device->state.role == R_SECONDARY) { + _drbd_request_state(device, NS(conn, C_WF_REPORT_PARAMS), CS_VERBOSE + CS_WAIT_COMPLETE); - idr_remove(&mdev->tconn->volumes, mdev->vnr); - idr_remove(&minors, mdev_to_minor(mdev)); - destroy_workqueue(mdev->submit.wq); - del_gendisk(mdev->vdisk); - synchronize_rcu(); - kref_put(&mdev->kref, &drbd_minor_destroy); + drbd_delete_device(device); return NO_ERROR; } else return ERR_MINOR_CONFIGURED; } -int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info) +int drbd_adm_del_minor(struct sk_buff *skb, struct genl_info *info) { + struct drbd_config_context adm_ctx; enum drbd_ret_code retcode; - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) goto out; - retcode = adm_delete_minor(adm_ctx.mdev); + mutex_lock(&adm_ctx.resource->adm_mutex); + retcode = adm_del_minor(adm_ctx.device); + mutex_unlock(&adm_ctx.resource->adm_mutex); out: - drbd_adm_finish(info, retcode); + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } +static int adm_del_resource(struct drbd_resource *resource) +{ + struct drbd_connection *connection; + + for_each_connection(connection, resource) { + if (connection->cstate > C_STANDALONE) + return ERR_NET_CONFIGURED; + } + if (!idr_is_empty(&resource->devices)) + return ERR_RES_IN_USE; + + list_del_rcu(&resource->resources); + /* Make sure all threads have actually stopped: state handling only + * does drbd_thread_stop_nowait(). */ + list_for_each_entry(connection, &resource->connections, connections) + drbd_thread_stop(&connection->worker); + synchronize_rcu(); + drbd_free_resource(resource); + return NO_ERROR; +} + int drbd_adm_down(struct sk_buff *skb, struct genl_info *info) { + struct drbd_config_context adm_ctx; + struct drbd_resource *resource; + struct drbd_connection *connection; + struct drbd_device *device; int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */ - struct drbd_conf *mdev; unsigned i; - retcode = drbd_adm_prepare(skb, info, 0); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) - goto out; - - if (!adm_ctx.tconn) { - retcode = ERR_RES_NOT_KNOWN; - goto out; - } + goto finish; + resource = adm_ctx.resource; + mutex_lock(&resource->adm_mutex); /* demote */ - idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) { - retcode = drbd_set_role(mdev, R_SECONDARY, 0); + for_each_connection(connection, resource) { + struct drbd_peer_device *peer_device; + + idr_for_each_entry(&connection->peer_devices, peer_device, i) { + retcode = drbd_set_role(peer_device->device, R_SECONDARY, 0); + if (retcode < SS_SUCCESS) { + drbd_msg_put_info(adm_ctx.reply_skb, "failed to demote"); + goto out; + } + } + + retcode = conn_try_disconnect(connection, 0); if (retcode < SS_SUCCESS) { - drbd_msg_put_info("failed to demote"); + drbd_msg_put_info(adm_ctx.reply_skb, "failed to disconnect"); goto out; } } - retcode = conn_try_disconnect(adm_ctx.tconn, 0); - if (retcode < SS_SUCCESS) { - drbd_msg_put_info("failed to disconnect"); - goto out; - } - /* detach */ - idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) { - retcode = adm_detach(mdev, 0); + idr_for_each_entry(&resource->devices, device, i) { + retcode = adm_detach(device, 0); if (retcode < SS_SUCCESS || retcode > NO_ERROR) { - drbd_msg_put_info("failed to detach"); + drbd_msg_put_info(adm_ctx.reply_skb, "failed to detach"); goto out; } } - /* If we reach this, all volumes (of this tconn) are Secondary, - * Disconnected, Diskless, aka Unconfigured. Make sure all threads have - * actually stopped, state handling only does drbd_thread_stop_nowait(). */ - drbd_thread_stop(&adm_ctx.tconn->worker); - - /* Now, nothing can fail anymore */ - /* delete volumes */ - idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) { - retcode = adm_delete_minor(mdev); + idr_for_each_entry(&resource->devices, device, i) { + retcode = adm_del_minor(device); if (retcode != NO_ERROR) { /* "can not happen" */ - drbd_msg_put_info("failed to delete volume"); + drbd_msg_put_info(adm_ctx.reply_skb, "failed to delete volume"); goto out; } } - /* delete connection */ - if (conn_lowest_minor(adm_ctx.tconn) < 0) { - list_del_rcu(&adm_ctx.tconn->all_tconn); - synchronize_rcu(); - kref_put(&adm_ctx.tconn->kref, &conn_destroy); - - retcode = NO_ERROR; - } else { - /* "can not happen" */ - retcode = ERR_RES_IN_USE; - drbd_msg_put_info("failed to delete connection"); - } - goto out; + retcode = adm_del_resource(resource); out: - drbd_adm_finish(info, retcode); + mutex_unlock(&resource->adm_mutex); +finish: + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info) { + struct drbd_config_context adm_ctx; + struct drbd_resource *resource; enum drbd_ret_code retcode; - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) - goto out; - - if (conn_lowest_minor(adm_ctx.tconn) < 0) { - list_del_rcu(&adm_ctx.tconn->all_tconn); - synchronize_rcu(); - kref_put(&adm_ctx.tconn->kref, &conn_destroy); - - retcode = NO_ERROR; - } else { - retcode = ERR_RES_IN_USE; - } + goto finish; + resource = adm_ctx.resource; - if (retcode == NO_ERROR) - drbd_thread_stop(&adm_ctx.tconn->worker); -out: - drbd_adm_finish(info, retcode); + mutex_lock(&resource->adm_mutex); + retcode = adm_del_resource(resource); + mutex_unlock(&resource->adm_mutex); +finish: + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } -void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib) +void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib) { static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */ struct sk_buff *msg; @@ -3351,13 +3643,6 @@ unsigned seq; int err = -ENOMEM; - if (sib->sib_reason == SIB_SYNC_PROGRESS) { - if (time_after(jiffies, mdev->rs_last_bcast + HZ)) - mdev->rs_last_bcast = jiffies; - else - return; - } - seq = atomic_inc_return(&drbd_genl_seq); msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO); if (!msg) @@ -3367,10 +3652,10 @@ d_out = genlmsg_put(msg, 0, seq, &drbd_genl_family, 0, DRBD_EVENT); if (!d_out) /* cannot happen, but anyways. */ goto nla_put_failure; - d_out->minor = mdev_to_minor(mdev); + d_out->minor = device_to_minor(device); d_out->ret_code = NO_ERROR; - if (nla_put_status_info(msg, mdev, sib)) + if (nla_put_status_info(msg, device, sib)) goto nla_put_failure; genlmsg_end(msg, d_out); err = drbd_genl_multicast_events(msg, 0); @@ -3383,7 +3668,7 @@ nla_put_failure: nlmsg_free(msg); failed: - dev_err(DEV, "Error %d while broadcasting event. " + drbd_err(device, "Error %d while broadcasting event. " "Event seq:%u sib_reason:%u\n", err, seq, sib->sib_reason); }