// SPDX-License-Identifier: GPL-2.0-or-later /* * * Copyright (C) 2005 Oracle. All rights reserved. */ /* This quorum hack is only here until we transition to some more rational * approach that is driven from userspace. Honest. No foolin'. * * Imagine two nodes lose network connectivity to each other but they're still * up and operating in every other way. Presumably a network timeout indicates * that a node is broken and should be recovered. They can't both recover each * other and both carry on without serialising their access to the file system. * They need to decide who is authoritative. Now extend that problem to * arbitrary groups of nodes losing connectivity between each other. * * So we declare that a node which has given up on connecting to a majority * of nodes who are still heartbeating will fence itself. * * There are huge opportunities for races here. After we give up on a node's * connection we need to wait long enough to give heartbeat an opportunity * to declare the node as truly dead. We also need to be careful with the * race between when we see a node start heartbeating and when we connect * to it. * * So nodes that are in this transtion put a hold on the quorum decision * with a counter. As they fall out of this transition they drop the count * and if they're the last, they fire off the decision. */ #include <linux/kernel.h> #include <linux/workqueue.h> #include <linux/reboot.h> #include "heartbeat.h" #include "nodemanager.h" #define MLOG_MASK_PREFIX … #include "masklog.h" #include "quorum.h" static struct o2quo_state { … } o2quo_state; /* this is horribly heavy-handed. It should instead flip the file * system RO and call some userspace script. */ static void o2quo_fence_self(void) { … } /* Indicate that a timeout occurred on a heartbeat region write. The * other nodes in the cluster may consider us dead at that time so we * want to "fence" ourselves so that we don't scribble on the disk * after they think they've recovered us. This can't solve all * problems related to writeout after recovery but this hack can at * least close some of those gaps. When we have real fencing, this can * go away as our node would be fenced externally before other nodes * begin recovery. */ void o2quo_disk_timeout(void) { … } static void o2quo_make_decision(struct work_struct *work) { … } static void o2quo_set_hold(struct o2quo_state *qs, u8 node) { … } static void o2quo_clear_hold(struct o2quo_state *qs, u8 node) { … } /* as a node comes up we delay the quorum decision until we know the fate of * the connection. the hold will be droped in conn_up or hb_down. it might be * perpetuated by con_err until hb_down. if we already have a conn, we might * be dropping a hold that conn_up got. */ void o2quo_hb_up(u8 node) { … } /* hb going down releases any holds we might have had due to this node from * conn_up, conn_err, or hb_up */ void o2quo_hb_down(u8 node) { … } /* this tells us that we've decided that the node is still heartbeating * even though we've lost it's conn. it must only be called after conn_err * and indicates that we must now make a quorum decision in the future, * though we might be doing so after waiting for holds to drain. Here * we'll be dropping the hold from conn_err. */ void o2quo_hb_still_up(u8 node) { … } /* This is analogous to hb_up. as a node's connection comes up we delay the * quorum decision until we see it heartbeating. the hold will be droped in * hb_up or hb_down. it might be perpetuated by con_err until hb_down. if * it's already heartbeating we might be dropping a hold that conn_up got. * */ void o2quo_conn_up(u8 node) { … } /* we've decided that we won't ever be connecting to the node again. if it's * still heartbeating we grab a hold that will delay decisions until either the * node stops heartbeating from hb_down or the caller decides that the node is * still up and calls still_up */ void o2quo_conn_err(u8 node) { … } void o2quo_init(void) { … } void o2quo_exit(void) { … }