Lines Matching refs:dlm
143 static void dlm_unregister_domain_handlers(struct dlm_ctxt *dlm);
145 void __dlm_unhash_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
150 mlog(0, "%s: Unhash res %.*s\n", dlm->name, res->lockname.len,
156 void __dlm_insert_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
160 assert_spin_locked(&dlm->spinlock);
162 bucket = dlm_lockres_hash(dlm, res->lockname.hash);
169 mlog(0, "%s: Hash res %.*s\n", dlm->name, res->lockname.len,
173 struct dlm_lock_resource * __dlm_lookup_lockres_full(struct dlm_ctxt *dlm,
183 assert_spin_locked(&dlm->spinlock);
185 bucket = dlm_lockres_hash(dlm, hash);
206 struct dlm_lock_resource * __dlm_lookup_lockres(struct dlm_ctxt *dlm,
215 assert_spin_locked(&dlm->spinlock);
217 res = __dlm_lookup_lockres_full(dlm, name, len, hash);
231 struct dlm_lock_resource * dlm_lookup_lockres(struct dlm_ctxt *dlm,
238 spin_lock(&dlm->spinlock);
239 res = __dlm_lookup_lockres(dlm, name, len, hash);
240 spin_unlock(&dlm->spinlock);
290 static void dlm_free_ctxt_mem(struct dlm_ctxt *dlm)
292 dlm_destroy_debugfs_subroot(dlm);
294 if (dlm->lockres_hash)
295 dlm_free_pagevec((void **)dlm->lockres_hash, DLM_HASH_PAGES);
297 if (dlm->master_hash)
298 dlm_free_pagevec((void **)dlm->master_hash, DLM_HASH_PAGES);
300 kfree(dlm->name);
301 kfree(dlm);
309 struct dlm_ctxt *dlm;
311 dlm = container_of(kref, struct dlm_ctxt, dlm_refs);
313 BUG_ON(dlm->num_joins);
314 BUG_ON(dlm->dlm_state == DLM_CTXT_JOINED);
317 list_del_init(&dlm->list);
321 mlog(0, "freeing memory from domain %s\n", dlm->name);
325 dlm_free_ctxt_mem(dlm);
330 void dlm_put(struct dlm_ctxt *dlm)
333 kref_put(&dlm->dlm_refs, dlm_ctxt_release);
337 static void __dlm_get(struct dlm_ctxt *dlm)
339 kref_get(&dlm->dlm_refs);
342 /* given a questionable reference to a dlm object, gets a reference if
345 struct dlm_ctxt *dlm_grab(struct dlm_ctxt *dlm)
353 if (target == dlm) {
365 int dlm_domain_fully_joined(struct dlm_ctxt *dlm)
370 ret = (dlm->dlm_state == DLM_CTXT_JOINED) ||
371 (dlm->dlm_state == DLM_CTXT_IN_SHUTDOWN);
377 static void dlm_destroy_dlm_worker(struct dlm_ctxt *dlm)
379 if (dlm->dlm_worker) {
380 destroy_workqueue(dlm->dlm_worker);
381 dlm->dlm_worker = NULL;
385 static void dlm_complete_dlm_shutdown(struct dlm_ctxt *dlm)
387 dlm_unregister_domain_handlers(dlm);
388 dlm_complete_thread(dlm);
389 dlm_complete_recovery_thread(dlm);
390 dlm_destroy_dlm_worker(dlm);
396 list_del_init(&dlm->list);
403 static int dlm_migrate_all_locks(struct dlm_ctxt *dlm)
411 mlog(0, "Migrating locks from domain %s\n", dlm->name);
414 spin_lock(&dlm->spinlock);
418 bucket = dlm_lockres_hash(dlm, i);
425 /* migrate, if necessary. this will drop the dlm
427 dropped = dlm_empty_lockres(dlm, res);
431 __dlm_lockres_calc_usage(dlm, res);
439 cond_resched_lock(&dlm->spinlock);
443 cond_resched_lock(&dlm->spinlock);
448 if (dlm->reco.state & DLM_RECO_STATE_ACTIVE) {
450 "need to be migrated after dlm recovery\n", dlm->name);
453 mlog(0, "%s: we won't do dlm recovery after migrating "
454 "all lock resources\n", dlm->name);
455 dlm->migrate_done = 1;
459 spin_unlock(&dlm->spinlock);
460 wake_up(&dlm->dlm_thread_wq);
462 /* let the dlm thread take care of purging, keep scanning until
466 dlm->name, num);
469 mlog(0, "DONE Migrating locks from domain %s\n", dlm->name);
473 static int dlm_no_joining_node(struct dlm_ctxt *dlm)
477 spin_lock(&dlm->spinlock);
478 ret = dlm->joining_node == DLM_LOCK_RES_OWNER_UNKNOWN;
479 spin_unlock(&dlm->spinlock);
487 struct dlm_ctxt *dlm = data;
491 if (!dlm_grab(dlm))
495 mlog(0, "%s: Node %u sent a begin exit domain message\n", dlm->name, node);
497 spin_lock(&dlm->spinlock);
498 set_bit(node, dlm->exit_domain_map);
499 spin_unlock(&dlm->spinlock);
501 dlm_put(dlm);
506 static void dlm_mark_domain_leaving(struct dlm_ctxt *dlm)
508 /* Yikes, a double spinlock! I need domain_lock for the dlm
509 * state and the dlm spinlock for join state... Sorry! */
512 spin_lock(&dlm->spinlock);
514 if (dlm->joining_node != DLM_LOCK_RES_OWNER_UNKNOWN) {
516 dlm->joining_node);
517 spin_unlock(&dlm->spinlock);
520 wait_event(dlm->dlm_join_events, dlm_no_joining_node(dlm));
524 dlm->dlm_state = DLM_CTXT_LEAVING;
525 spin_unlock(&dlm->spinlock);
529 static void __dlm_print_nodes(struct dlm_ctxt *dlm)
533 assert_spin_locked(&dlm->spinlock);
536 while ((node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES,
547 struct dlm_ctxt *dlm = data;
553 if (!dlm_grab(dlm))
558 spin_lock(&dlm->spinlock);
559 clear_bit(node, dlm->domain_map);
560 clear_bit(node, dlm->exit_domain_map);
561 printk(KERN_NOTICE "o2dlm: Node %u leaves domain %s ", node, dlm->name);
562 __dlm_print_nodes(dlm);
565 dlm_hb_event_notify_attached(dlm, node, 0);
567 spin_unlock(&dlm->spinlock);
569 dlm_put(dlm);
574 static int dlm_send_one_domain_exit(struct dlm_ctxt *dlm, u32 msg_type,
580 mlog(0, "%s: Sending domain exit message %u to node %u\n", dlm->name,
584 leave_msg.node_idx = dlm->node_num;
586 status = o2net_send_message(msg_type, dlm->key, &leave_msg,
591 dlm->name);
596 static void dlm_begin_exit_domain(struct dlm_ctxt *dlm)
601 if (dlm->dlm_locking_proto.pv_major == 1 &&
602 dlm->dlm_locking_proto.pv_minor < 2)
610 spin_lock(&dlm->spinlock);
612 node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES, node + 1);
615 if (node == dlm->node_num)
618 spin_unlock(&dlm->spinlock);
619 dlm_send_one_domain_exit(dlm, DLM_BEGIN_EXIT_DOMAIN_MSG, node);
620 spin_lock(&dlm->spinlock);
622 spin_unlock(&dlm->spinlock);
625 static void dlm_leave_domain(struct dlm_ctxt *dlm)
630 * accept mastership of new ones. The dlm is responsible for
634 spin_lock(&dlm->spinlock);
636 clear_bit(dlm->node_num, dlm->domain_map);
637 while ((node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES,
639 /* Drop the dlm spinlock. This is safe wrt the domain_map.
645 spin_unlock(&dlm->spinlock);
649 status = dlm_send_one_domain_exit(dlm, DLM_EXIT_DOMAIN_MSG,
664 spin_lock(&dlm->spinlock);
668 clear_bit(node, dlm->domain_map);
670 spin_unlock(&dlm->spinlock);
673 void dlm_unregister_domain(struct dlm_ctxt *dlm)
679 BUG_ON(dlm->dlm_state != DLM_CTXT_JOINED);
680 BUG_ON(!dlm->num_joins);
682 dlm->num_joins--;
683 if (!dlm->num_joins) {
690 dlm->dlm_state = DLM_CTXT_IN_SHUTDOWN;
696 mlog(0, "shutting down domain %s\n", dlm->name);
697 dlm_begin_exit_domain(dlm);
699 /* We changed dlm state, notify the thread */
700 dlm_kick_thread(dlm, NULL);
702 while (dlm_migrate_all_locks(dlm)) {
705 mlog(0, "%s: more migration to do\n", dlm->name);
709 if (!list_empty(&dlm->tracking_list)) {
712 list_for_each_entry(res, &dlm->tracking_list, tracking)
716 dlm_mark_domain_leaving(dlm);
717 dlm_leave_domain(dlm);
718 printk(KERN_NOTICE "o2dlm: Leaving domain %s\n", dlm->name);
719 dlm_force_free_mles(dlm);
720 dlm_complete_dlm_shutdown(dlm);
722 dlm_put(dlm);
796 struct dlm_ctxt *dlm = NULL;
821 dlm = __dlm_lookup_domain_full(query->domain, query->name_len);
822 if (!dlm)
832 if (test_bit(nodenum, dlm->domain_map)) {
844 /* Once the dlm ctxt is marked as leaving then we don't want
848 if (dlm->dlm_state != DLM_CTXT_LEAVING) {
850 spin_lock(&dlm->spinlock);
852 if (dlm->dlm_state == DLM_CTXT_NEW &&
853 dlm->joining_node == DLM_LOCK_RES_OWNER_UNKNOWN) {
858 } else if (dlm->joining_node != DLM_LOCK_RES_OWNER_UNKNOWN) {
861 } else if (dlm->reco.state & DLM_RECO_STATE_ACTIVE) {
865 } else if (test_bit(bit, dlm->recovery_map)) {
869 } else if (test_bit(bit, dlm->domain_map)) {
882 &dlm->dlm_locking_proto,
886 &dlm->fs_locking_proto,
893 __dlm_set_joining_node(dlm, query->node_idx);
897 spin_unlock(&dlm->spinlock);
913 struct dlm_ctxt *dlm = NULL;
921 dlm = __dlm_lookup_domain_full(assert->domain, assert->name_len);
922 /* XXX should we consider no dlm ctxt an error? */
923 if (dlm) {
924 spin_lock(&dlm->spinlock);
929 BUG_ON(dlm->joining_node != assert->node_idx);
931 if (dlm->reco.state & DLM_RECO_STATE_ACTIVE) {
932 mlog(0, "dlm recovery is ongoing, disallow join\n");
933 spin_unlock(&dlm->spinlock);
938 set_bit(assert->node_idx, dlm->domain_map);
939 clear_bit(assert->node_idx, dlm->exit_domain_map);
940 __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
943 assert->node_idx, dlm->name);
944 __dlm_print_nodes(dlm);
947 dlm_hb_event_notify_attached(dlm, assert->node_idx, 1);
949 spin_unlock(&dlm->spinlock);
956 static int dlm_match_regions(struct dlm_ctxt *dlm,
969 qr->qr_domain, qr->qr_node, dlm->node_num);
978 qr->qr_domain, dlm->node_num, qr->qr_node);
1009 dlm->node_num, qr->qr_node);
1032 qr->qr_node, dlm->node_num);
1042 static int dlm_send_regions(struct dlm_ctxt *dlm, unsigned long *node_map)
1058 qr->qr_node = dlm->node_num;
1059 qr->qr_namelen = strlen(dlm->name);
1060 memcpy(qr->qr_domain, dlm->name, qr->qr_namelen);
1073 if (i == dlm->node_num)
1099 struct dlm_ctxt *dlm = NULL;
1116 dlm = __dlm_lookup_domain_full(qr->qr_domain, qr->qr_namelen);
1117 if (!dlm) {
1123 spin_lock(&dlm->spinlock);
1124 if (dlm->joining_node != qr->qr_node) {
1127 dlm->joining_node);
1132 if (dlm->dlm_locking_proto.pv_major == 1 &&
1133 dlm->dlm_locking_proto.pv_minor == 0) {
1135 "but active dlm protocol is %d.%d\n", qr->qr_node,
1136 qr->qr_domain, dlm->dlm_locking_proto.pv_major,
1137 dlm->dlm_locking_proto.pv_minor);
1141 status = dlm_match_regions(dlm, qr, local, sizeof(qr->qr_regions));
1144 spin_unlock(&dlm->spinlock);
1154 static int dlm_match_nodes(struct dlm_ctxt *dlm, struct dlm_query_nodeinfo *qn)
1196 qn->qn_nodenum, dlm->node_num);
1203 dlm->node_num, qn->qn_nodenum);
1214 static int dlm_send_nodeinfo(struct dlm_ctxt *dlm, unsigned long *node_map)
1243 qn->qn_nodenum = dlm->node_num;
1245 qn->qn_namelen = strlen(dlm->name);
1246 memcpy(qn->qn_domain, dlm->name, qn->qn_namelen);
1251 if (i == dlm->node_num)
1276 struct dlm_ctxt *dlm = NULL;
1285 dlm = __dlm_lookup_domain_full(qn->qn_domain, qn->qn_namelen);
1286 if (!dlm) {
1292 spin_lock(&dlm->spinlock);
1293 if (dlm->joining_node != qn->qn_nodenum) {
1296 dlm->joining_node);
1301 if (dlm->dlm_locking_proto.pv_major == 1 &&
1302 dlm->dlm_locking_proto.pv_minor == 0) {
1304 "but active dlm protocol is %d.%d\n", qn->qn_nodenum,
1305 qn->qn_domain, dlm->dlm_locking_proto.pv_major,
1306 dlm->dlm_locking_proto.pv_minor);
1310 status = dlm_match_nodes(dlm, qn);
1313 spin_unlock(&dlm->spinlock);
1324 struct dlm_ctxt *dlm = NULL;
1332 dlm = __dlm_lookup_domain_full(cancel->domain, cancel->name_len);
1334 if (dlm) {
1335 spin_lock(&dlm->spinlock);
1339 BUG_ON(dlm->joining_node != cancel->node_idx);
1340 __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
1342 spin_unlock(&dlm->spinlock);
1349 static int dlm_send_one_join_cancel(struct dlm_ctxt *dlm,
1356 cancel_msg.node_idx = dlm->node_num;
1357 cancel_msg.name_len = strlen(dlm->name);
1358 memcpy(cancel_msg.domain, dlm->name, cancel_msg.name_len);
1375 static int dlm_send_join_cancels(struct dlm_ctxt *dlm,
1394 if (node == dlm->node_num)
1397 tmpstat = dlm_send_one_join_cancel(dlm, node);
1411 static int dlm_request_join(struct dlm_ctxt *dlm,
1423 join_msg.node_idx = dlm->node_num;
1424 join_msg.name_len = strlen(dlm->name);
1425 memcpy(join_msg.domain, dlm->name, join_msg.name_len);
1426 join_msg.dlm_proto = dlm->dlm_locking_proto;
1427 join_msg.fs_proto = dlm->fs_locking_proto;
1430 byte_copymap(join_msg.node_map, dlm->live_nodes_map, O2NM_MAX_NODES);
1444 his dlm isn't up, so we can consider him a 'yes' but not
1461 dlm->dlm_locking_proto.pv_major,
1462 dlm->dlm_locking_proto.pv_minor,
1463 dlm->fs_locking_proto.pv_major,
1464 dlm->fs_locking_proto.pv_minor,
1470 dlm->dlm_locking_proto.pv_minor = packet.dlm_minor;
1471 dlm->fs_locking_proto.pv_minor = packet.fs_minor;
1476 dlm->dlm_locking_proto.pv_major,
1477 dlm->dlm_locking_proto.pv_minor,
1478 dlm->fs_locking_proto.pv_major,
1479 dlm->fs_locking_proto.pv_minor);
1498 static int dlm_send_one_join_assert(struct dlm_ctxt *dlm,
1508 assert_msg.node_idx = dlm->node_num;
1509 assert_msg.name_len = strlen(dlm->name);
1510 memcpy(assert_msg.domain, dlm->name, assert_msg.name_len);
1525 static void dlm_send_join_asserts(struct dlm_ctxt *dlm,
1533 if (node == dlm->node_num)
1540 status = dlm_send_one_join_assert(dlm, node);
1542 spin_lock(&dlm->spinlock);
1543 live = test_bit(node, dlm->live_nodes_map);
1544 spin_unlock(&dlm->spinlock);
1563 static int dlm_should_restart_join(struct dlm_ctxt *dlm,
1574 spin_lock(&dlm->spinlock);
1577 ret = !bitmap_equal(ctxt->live_map, dlm->live_nodes_map,
1579 spin_unlock(&dlm->spinlock);
1587 static int dlm_try_to_join_domain(struct dlm_ctxt *dlm)
1593 mlog(0, "%p", dlm);
1605 o2hb_fill_node_map(dlm->live_nodes_map, O2NM_MAX_NODES);
1607 spin_lock(&dlm->spinlock);
1608 bitmap_copy(ctxt->live_map, dlm->live_nodes_map, O2NM_MAX_NODES);
1609 __dlm_set_joining_node(dlm, dlm->node_num);
1610 spin_unlock(&dlm->spinlock);
1615 if (node == dlm->node_num)
1618 status = dlm_request_join(dlm, node, &response);
1625 * dlm up. */
1629 if (dlm_should_restart_join(dlm, ctxt, response)) {
1641 spin_lock(&dlm->spinlock);
1642 bitmap_copy(dlm->domain_map, ctxt->yes_resp_map, O2NM_MAX_NODES);
1643 set_bit(dlm->node_num, dlm->domain_map);
1644 spin_unlock(&dlm->spinlock);
1647 if (dlm->dlm_locking_proto.pv_major > 1 ||
1648 dlm->dlm_locking_proto.pv_minor > 0) {
1649 status = dlm_send_nodeinfo(dlm, ctxt->yes_resp_map);
1654 status = dlm_send_regions(dlm, ctxt->yes_resp_map);
1661 dlm_send_join_asserts(dlm, ctxt->yes_resp_map);
1668 dlm->dlm_state = DLM_CTXT_JOINED;
1669 dlm->num_joins++;
1673 spin_lock(&dlm->spinlock);
1674 __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
1676 printk(KERN_NOTICE "o2dlm: Joining domain %s ", dlm->name);
1677 __dlm_print_nodes(dlm);
1679 spin_unlock(&dlm->spinlock);
1684 tmpstat = dlm_send_join_cancels(dlm,
1697 static void dlm_unregister_domain_handlers(struct dlm_ctxt *dlm)
1699 o2hb_unregister_callback(dlm->name, &dlm->dlm_hb_up);
1700 o2hb_unregister_callback(dlm->name, &dlm->dlm_hb_down);
1701 o2net_unregister_handler_list(&dlm->dlm_domain_handlers);
1704 static int dlm_register_domain_handlers(struct dlm_ctxt *dlm)
1710 o2hb_setup_callback(&dlm->dlm_hb_down, O2HB_NODE_DOWN_CB,
1711 dlm_hb_node_down_cb, dlm, DLM_HB_NODE_DOWN_PRI);
1712 o2hb_setup_callback(&dlm->dlm_hb_up, O2HB_NODE_UP_CB,
1713 dlm_hb_node_up_cb, dlm, DLM_HB_NODE_UP_PRI);
1715 status = o2hb_register_callback(dlm->name, &dlm->dlm_hb_down);
1719 status = o2hb_register_callback(dlm->name, &dlm->dlm_hb_up);
1723 status = o2net_register_handler(DLM_MASTER_REQUEST_MSG, dlm->key,
1726 dlm, NULL, &dlm->dlm_domain_handlers);
1730 status = o2net_register_handler(DLM_ASSERT_MASTER_MSG, dlm->key,
1733 dlm, dlm_assert_master_post_handler,
1734 &dlm->dlm_domain_handlers);
1738 status = o2net_register_handler(DLM_CREATE_LOCK_MSG, dlm->key,
1741 dlm, NULL, &dlm->dlm_domain_handlers);
1745 status = o2net_register_handler(DLM_CONVERT_LOCK_MSG, dlm->key,
1748 dlm, NULL, &dlm->dlm_domain_handlers);
1752 status = o2net_register_handler(DLM_UNLOCK_LOCK_MSG, dlm->key,
1755 dlm, NULL, &dlm->dlm_domain_handlers);
1759 status = o2net_register_handler(DLM_PROXY_AST_MSG, dlm->key,
1762 dlm, NULL, &dlm->dlm_domain_handlers);
1766 status = o2net_register_handler(DLM_EXIT_DOMAIN_MSG, dlm->key,
1769 dlm, NULL, &dlm->dlm_domain_handlers);
1773 status = o2net_register_handler(DLM_DEREF_LOCKRES_MSG, dlm->key,
1776 dlm, NULL, &dlm->dlm_domain_handlers);
1780 status = o2net_register_handler(DLM_MIGRATE_REQUEST_MSG, dlm->key,
1783 dlm, NULL, &dlm->dlm_domain_handlers);
1787 status = o2net_register_handler(DLM_MIG_LOCKRES_MSG, dlm->key,
1790 dlm, NULL, &dlm->dlm_domain_handlers);
1794 status = o2net_register_handler(DLM_MASTER_REQUERY_MSG, dlm->key,
1797 dlm, NULL, &dlm->dlm_domain_handlers);
1801 status = o2net_register_handler(DLM_LOCK_REQUEST_MSG, dlm->key,
1804 dlm, NULL, &dlm->dlm_domain_handlers);
1808 status = o2net_register_handler(DLM_RECO_DATA_DONE_MSG, dlm->key,
1811 dlm, NULL, &dlm->dlm_domain_handlers);
1815 status = o2net_register_handler(DLM_BEGIN_RECO_MSG, dlm->key,
1818 dlm, NULL, &dlm->dlm_domain_handlers);
1822 status = o2net_register_handler(DLM_FINALIZE_RECO_MSG, dlm->key,
1825 dlm, NULL, &dlm->dlm_domain_handlers);
1829 status = o2net_register_handler(DLM_BEGIN_EXIT_DOMAIN_MSG, dlm->key,
1832 dlm, NULL, &dlm->dlm_domain_handlers);
1836 status = o2net_register_handler(DLM_DEREF_LOCKRES_DONE, dlm->key,
1839 dlm, NULL, &dlm->dlm_domain_handlers);
1842 dlm_unregister_domain_handlers(dlm);
1847 static int dlm_join_domain(struct dlm_ctxt *dlm)
1854 BUG_ON(!dlm);
1856 mlog(0, "Join domain %s\n", dlm->name);
1858 status = dlm_register_domain_handlers(dlm);
1864 status = dlm_launch_thread(dlm);
1870 status = dlm_launch_recovery_thread(dlm);
1876 dlm_debug_init(dlm);
1878 snprintf(wq_name, O2NM_MAX_NAME_LEN, "dlm_wq-%s", dlm->name);
1879 dlm->dlm_worker = alloc_workqueue(wq_name, WQ_MEM_RECLAIM, 0);
1880 if (!dlm->dlm_worker) {
1887 status = dlm_try_to_join_domain(dlm);
1901 mlog(ML_NOTICE, "Timed out joining dlm domain "
1902 "%s after %u msecs\n", dlm->name,
1932 dlm_unregister_domain_handlers(dlm);
1933 dlm_complete_thread(dlm);
1934 dlm_complete_recovery_thread(dlm);
1935 dlm_destroy_dlm_worker(dlm);
1946 struct dlm_ctxt *dlm = NULL;
1948 dlm = kzalloc(sizeof(*dlm), GFP_KERNEL);
1949 if (!dlm) {
1955 dlm->name = kstrdup(domain, GFP_KERNEL);
1956 if (dlm->name == NULL) {
1962 dlm->lockres_hash = (struct hlist_head **)dlm_alloc_pagevec(DLM_HASH_PAGES);
1963 if (!dlm->lockres_hash) {
1970 INIT_HLIST_HEAD(dlm_lockres_hash(dlm, i));
1972 dlm->master_hash = (struct hlist_head **)
1974 if (!dlm->master_hash) {
1981 INIT_HLIST_HEAD(dlm_master_hash(dlm, i));
1983 dlm->key = key;
1984 dlm->node_num = o2nm_this_node();
1986 dlm_create_debugfs_subroot(dlm);
1988 spin_lock_init(&dlm->spinlock);
1989 spin_lock_init(&dlm->master_lock);
1990 spin_lock_init(&dlm->ast_lock);
1991 spin_lock_init(&dlm->track_lock);
1992 INIT_LIST_HEAD(&dlm->list);
1993 INIT_LIST_HEAD(&dlm->dirty_list);
1994 INIT_LIST_HEAD(&dlm->reco.resources);
1995 INIT_LIST_HEAD(&dlm->reco.node_data);
1996 INIT_LIST_HEAD(&dlm->purge_list);
1997 INIT_LIST_HEAD(&dlm->dlm_domain_handlers);
1998 INIT_LIST_HEAD(&dlm->tracking_list);
1999 dlm->reco.state = 0;
2001 INIT_LIST_HEAD(&dlm->pending_asts);
2002 INIT_LIST_HEAD(&dlm->pending_basts);
2004 mlog(0, "dlm->recovery_map=%p, &(dlm->recovery_map[0])=%p\n",
2005 dlm->recovery_map, &(dlm->recovery_map[0]));
2007 bitmap_zero(dlm->recovery_map, O2NM_MAX_NODES);
2008 bitmap_zero(dlm->live_nodes_map, O2NM_MAX_NODES);
2009 bitmap_zero(dlm->domain_map, O2NM_MAX_NODES);
2011 dlm->dlm_thread_task = NULL;
2012 dlm->dlm_reco_thread_task = NULL;
2013 dlm->dlm_worker = NULL;
2014 init_waitqueue_head(&dlm->dlm_thread_wq);
2015 init_waitqueue_head(&dlm->dlm_reco_thread_wq);
2016 init_waitqueue_head(&dlm->reco.event);
2017 init_waitqueue_head(&dlm->ast_wq);
2018 init_waitqueue_head(&dlm->migration_wq);
2019 INIT_LIST_HEAD(&dlm->mle_hb_events);
2021 dlm->joining_node = DLM_LOCK_RES_OWNER_UNKNOWN;
2022 init_waitqueue_head(&dlm->dlm_join_events);
2024 dlm->migrate_done = 0;
2026 dlm->reco.new_master = O2NM_INVALID_NODE_NUM;
2027 dlm->reco.dead_node = O2NM_INVALID_NODE_NUM;
2029 atomic_set(&dlm->res_tot_count, 0);
2030 atomic_set(&dlm->res_cur_count, 0);
2032 atomic_set(&dlm->mle_tot_count[i], 0);
2033 atomic_set(&dlm->mle_cur_count[i], 0);
2036 spin_lock_init(&dlm->work_lock);
2037 INIT_LIST_HEAD(&dlm->work_list);
2038 INIT_WORK(&dlm->dispatched_work, dlm_dispatch_work);
2040 kref_init(&dlm->dlm_refs);
2041 dlm->dlm_state = DLM_CTXT_NEW;
2043 INIT_LIST_HEAD(&dlm->dlm_eviction_callbacks);
2046 kref_read(&dlm->dlm_refs));
2050 if (ret < 0 && dlm) {
2051 if (dlm->master_hash)
2052 dlm_free_pagevec((void **)dlm->master_hash,
2055 if (dlm->lockres_hash)
2056 dlm_free_pagevec((void **)dlm->lockres_hash,
2059 kfree(dlm->name);
2060 kfree(dlm);
2061 dlm = NULL;
2063 return dlm;
2101 struct dlm_ctxt *dlm = NULL;
2113 dlm = NULL;
2122 dlm = __dlm_lookup_domain(domain);
2123 if (dlm) {
2124 if (dlm->dlm_state != DLM_CTXT_JOINED) {
2134 if (dlm_protocol_compare(&dlm->fs_locking_proto, fs_proto)) {
2144 __dlm_get(dlm);
2145 dlm->num_joins++;
2167 dlm = new_ctxt;
2171 list_add_tail(&dlm->list, &dlm_domains);
2178 dlm->dlm_locking_proto = dlm_protocol;
2179 dlm->fs_locking_proto = *fs_proto;
2181 ret = dlm_join_domain(dlm);
2184 dlm_put(dlm);
2189 *fs_proto = dlm->fs_locking_proto;
2197 dlm = ERR_PTR(ret);
2199 return dlm;
2257 * dlm completes it's recovery work, otherwise it may be able to
2258 * acquire locks on resources requiring recovery. Since the dlm can
2267 void dlm_fire_domain_eviction_callbacks(struct dlm_ctxt *dlm,
2273 list_for_each_entry(cb, &dlm->dlm_eviction_callbacks, ec_item) {
2289 void dlm_register_eviction_cb(struct dlm_ctxt *dlm,
2293 list_add_tail(&cb->ec_item, &dlm->dlm_eviction_callbacks);