pacemaker 2.1.5-a3f44794f94
Scalable High-Availability cluster resource manager
pcmk_sched_primitive.c
Go to the documentation of this file.
1/*
2 * Copyright 2004-2022 the Pacemaker project contributors
3 *
4 * The version control history for this file may have further details.
5 *
6 * This source code is licensed under the GNU General Public License version 2
7 * or later (GPLv2+) WITHOUT ANY WARRANTY.
8 */
9
10#include <crm_internal.h>
11
12#include <stdbool.h>
13
14#include <crm/msg_xml.h>
15#include <pacemaker-internal.h>
16
18
19static void stop_resource(pe_resource_t *rsc, pe_node_t *node, bool optional);
20static void start_resource(pe_resource_t *rsc, pe_node_t *node, bool optional);
21static void demote_resource(pe_resource_t *rsc, pe_node_t *node, bool optional);
22static void promote_resource(pe_resource_t *rsc, pe_node_t *node,
23 bool optional);
24static void assert_role_error(pe_resource_t *rsc, pe_node_t *node,
25 bool optional);
26
27static enum rsc_role_e rsc_state_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = {
28 /* This array lists the immediate next role when transitioning from one role
29 * to a target role. For example, when going from Stopped to Promoted, the
30 * next role is Unpromoted, because the resource must be started before it
31 * can be promoted. The current state then becomes Started, which is fed
32 * into this array again, giving a next role of Promoted.
33 *
34 * Current role Immediate next role Final target role
35 * ------------ ------------------- -----------------
36 */
37 /* Unknown */ { RSC_ROLE_UNKNOWN, /* Unknown */
38 RSC_ROLE_STOPPED, /* Stopped */
39 RSC_ROLE_STOPPED, /* Started */
40 RSC_ROLE_STOPPED, /* Unpromoted */
41 RSC_ROLE_STOPPED, /* Promoted */
42 },
43 /* Stopped */ { RSC_ROLE_STOPPED, /* Unknown */
44 RSC_ROLE_STOPPED, /* Stopped */
45 RSC_ROLE_STARTED, /* Started */
46 RSC_ROLE_UNPROMOTED, /* Unpromoted */
47 RSC_ROLE_UNPROMOTED, /* Promoted */
48 },
49 /* Started */ { RSC_ROLE_STOPPED, /* Unknown */
50 RSC_ROLE_STOPPED, /* Stopped */
51 RSC_ROLE_STARTED, /* Started */
52 RSC_ROLE_UNPROMOTED, /* Unpromoted */
53 RSC_ROLE_PROMOTED, /* Promoted */
54 },
55 /* Unpromoted */ { RSC_ROLE_STOPPED, /* Unknown */
56 RSC_ROLE_STOPPED, /* Stopped */
57 RSC_ROLE_STOPPED, /* Started */
58 RSC_ROLE_UNPROMOTED, /* Unpromoted */
59 RSC_ROLE_PROMOTED, /* Promoted */
60 },
61 /* Promoted */ { RSC_ROLE_STOPPED, /* Unknown */
62 RSC_ROLE_UNPROMOTED, /* Stopped */
63 RSC_ROLE_UNPROMOTED, /* Started */
64 RSC_ROLE_UNPROMOTED, /* Unpromoted */
65 RSC_ROLE_PROMOTED, /* Promoted */
66 },
67};
68
77typedef void (*rsc_transition_fn)(pe_resource_t *rsc, pe_node_t *node,
78 bool optional);
79
80static rsc_transition_fn rsc_action_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = {
81 /* This array lists the function needed to transition directly from one role
82 * to another. NULL indicates that nothing is needed.
83 *
84 * Current role Transition function Next role
85 * ------------ ------------------- ----------
86 */
87 /* Unknown */ { assert_role_error, /* Unknown */
88 stop_resource, /* Stopped */
89 assert_role_error, /* Started */
90 assert_role_error, /* Unpromoted */
91 assert_role_error, /* Promoted */
92 },
93 /* Stopped */ { assert_role_error, /* Unknown */
94 NULL, /* Stopped */
95 start_resource, /* Started */
96 start_resource, /* Unpromoted */
97 assert_role_error, /* Promoted */
98 },
99 /* Started */ { assert_role_error, /* Unknown */
100 stop_resource, /* Stopped */
101 NULL, /* Started */
102 NULL, /* Unpromoted */
103 promote_resource, /* Promoted */
104 },
105 /* Unpromoted */ { assert_role_error, /* Unknown */
106 stop_resource, /* Stopped */
107 stop_resource, /* Started */
108 NULL, /* Unpromoted */
109 promote_resource, /* Promoted */
110 },
111 /* Promoted */ { assert_role_error, /* Unknown */
112 demote_resource, /* Stopped */
113 demote_resource, /* Started */
114 demote_resource, /* Unpromoted */
115 NULL, /* Promoted */
116 },
117};
118
127static GList *
128sorted_allowed_nodes(const pe_resource_t *rsc)
129{
130 if (rsc->allowed_nodes != NULL) {
131 GList *nodes = g_hash_table_get_values(rsc->allowed_nodes);
132
133 if (nodes != NULL) {
134 return pcmk__sort_nodes(nodes, pe__current_node(rsc));
135 }
136 }
137 return NULL;
138}
139
149static bool
150assign_best_node(pe_resource_t *rsc, const pe_node_t *prefer)
151{
152 GList *nodes = NULL;
153 pe_node_t *chosen = NULL;
154 pe_node_t *best = NULL;
155 bool result = false;
156 const pe_node_t *most_free_node = pcmk__ban_insufficient_capacity(rsc);
157
158 if (prefer == NULL) {
159 prefer = most_free_node;
160 }
161
163 // We've already finished assignment of resources to nodes
164 return rsc->allocated_to != NULL;
165 }
166
167 // Sort allowed nodes by weight
168 nodes = sorted_allowed_nodes(rsc);
169 if (nodes != NULL) {
170 best = (pe_node_t *) nodes->data; // First node has best score
171 }
172
173 if ((prefer != NULL) && (nodes != NULL)) {
174 // Get the allowed node version of prefer
175 chosen = g_hash_table_lookup(rsc->allowed_nodes, prefer->details->id);
176
177 if (chosen == NULL) {
178 pe_rsc_trace(rsc, "Preferred node %s for %s was unknown",
179 pe__node_name(prefer), rsc->id);
180
181 /* Favor the preferred node as long as its weight is at least as good as
182 * the best allowed node's.
183 *
184 * An alternative would be to favor the preferred node even if the best
185 * node is better, when the best node's weight is less than INFINITY.
186 */
187 } else if (chosen->weight < best->weight) {
188 pe_rsc_trace(rsc, "Preferred node %s for %s was unsuitable",
189 pe__node_name(chosen), rsc->id);
190 chosen = NULL;
191
192 } else if (!pcmk__node_available(chosen, true, false)) {
193 pe_rsc_trace(rsc, "Preferred node %s for %s was unavailable",
194 pe__node_name(chosen), rsc->id);
195 chosen = NULL;
196
197 } else {
198 pe_rsc_trace(rsc,
199 "Chose preferred node %s for %s (ignoring %d candidates)",
200 pe__node_name(chosen), rsc->id, g_list_length(nodes));
201 }
202 }
203
204 if ((chosen == NULL) && (best != NULL)) {
205 /* Either there is no preferred node, or the preferred node is not
206 * suitable, but another node is allowed to run the resource.
207 */
208
209 chosen = best;
210
211 if (!pe_rsc_is_unique_clone(rsc->parent)
212 && (chosen->weight > 0) // Zero not acceptable
213 && pcmk__node_available(chosen, false, false)) {
214 /* If the resource is already running on a node, prefer that node if
215 * it is just as good as the chosen node.
216 *
217 * We don't do this for unique clone instances, because
218 * distribute_children() has already assigned instances to their
219 * running nodes when appropriate, and if we get here, we don't want
220 * remaining unassigned instances to prefer a node that's already
221 * running another instance.
222 */
223 pe_node_t *running = pe__current_node(rsc);
224
225 if (running == NULL) {
226 // Nothing to do
227
228 } else if (!pcmk__node_available(running, true, false)) {
229 pe_rsc_trace(rsc, "Current node for %s (%s) can't run resources",
230 rsc->id, pe__node_name(running));
231
232 } else {
233 int nodes_with_best_score = 1;
234
235 for (GList *iter = nodes->next; iter; iter = iter->next) {
236 pe_node_t *allowed = (pe_node_t *) iter->data;
237
238 if (allowed->weight != chosen->weight) {
239 // The nodes are sorted by weight, so no more are equal
240 break;
241 }
242 if (pe__same_node(allowed, running)) {
243 // Scores are equal, so prefer the current node
244 chosen = allowed;
245 }
246 nodes_with_best_score++;
247 }
248
249 if (nodes_with_best_score > 1) {
250 do_crm_log(((chosen->weight >= INFINITY)? LOG_WARNING : LOG_INFO),
251 "Chose %s for %s from %d nodes with score %s",
252 pe__node_name(chosen), rsc->id,
253 nodes_with_best_score,
254 pcmk_readable_score(chosen->weight));
255 }
256 }
257 }
258
259 pe_rsc_trace(rsc, "Chose %s for %s from %d candidates",
260 pe__node_name(chosen), rsc->id, g_list_length(nodes));
261 }
262
263 result = pcmk__finalize_assignment(rsc, chosen, false);
264 g_list_free(nodes);
265 return result;
266}
267
275static void
276apply_this_with(gpointer data, gpointer user_data)
277{
279 pe_resource_t *rsc = (pe_resource_t *) user_data;
280
281 GHashTable *archive = NULL;
282 pe_resource_t *other = colocation->primary;
283
284 // In certain cases, we will need to revert the node scores
285 if ((colocation->dependent_role >= RSC_ROLE_PROMOTED)
286 || ((colocation->score < 0) && (colocation->score > -INFINITY))) {
287 archive = pcmk__copy_node_table(rsc->allowed_nodes);
288 }
289
290 pe_rsc_trace(rsc,
291 "%s: Assigning colocation %s primary %s first"
292 "(score=%d role=%s)",
293 rsc->id, colocation->id, other->id,
294 colocation->score, role2text(colocation->dependent_role));
295 other->cmds->assign(other, NULL);
296
297 // Apply the colocation score to this resource's allowed node scores
298 rsc->cmds->apply_coloc_score(rsc, other, colocation, true);
299 if ((archive != NULL)
301 pe_rsc_info(rsc,
302 "%s: Reverting scores from colocation with %s "
303 "because no nodes allowed",
304 rsc->id, other->id);
305 g_hash_table_destroy(rsc->allowed_nodes);
306 rsc->allowed_nodes = archive;
307 archive = NULL;
308 }
309 if (archive != NULL) {
310 g_hash_table_destroy(archive);
311 }
312}
313
321static void
322apply_with_this(void *data, void *user_data)
323{
325 pe_resource_t *rsc = (pe_resource_t *) user_data;
326
327 pe_resource_t *other = colocation->dependent;
328 const float factor = colocation->score / (float) INFINITY;
329
330 if (!pcmk__colocation_has_influence(colocation, NULL)) {
331 return;
332 }
333 pe_rsc_trace(rsc,
334 "%s: Incorporating attenuated %s assignment scores due "
335 "to colocation %s", rsc->id, other->id, colocation->id);
337 colocation->node_attribute, factor,
339}
340
347static void
348remote_connection_assigned(const pe_resource_t *connection)
349{
350 pe_node_t *remote_node = pe_find_node(connection->cluster->nodes,
351 connection->id);
352
353 CRM_CHECK(remote_node != NULL, return);
354
355 if ((connection->allocated_to != NULL)
356 && (connection->next_role != RSC_ROLE_STOPPED)) {
357
358 crm_trace("Pacemaker Remote node %s will be online",
359 remote_node->details->id);
360 remote_node->details->online = TRUE;
361 if (remote_node->details->unseen) {
362 // Avoid unnecessary fence, since we will attempt connection
363 remote_node->details->unclean = FALSE;
364 }
365
366 } else {
367 crm_trace("Pacemaker Remote node %s will be shut down "
368 "(%sassigned connection's next role is %s)",
369 remote_node->details->id,
370 ((connection->allocated_to == NULL)? "un" : ""),
371 role2text(connection->next_role));
372 remote_node->details->shutdown = TRUE;
373 }
374}
375
385pe_node_t *
387{
388 CRM_ASSERT(rsc != NULL);
389
390 // Never assign a child without parent being assigned first
391 if ((rsc->parent != NULL)
393 pe_rsc_debug(rsc, "%s: Assigning parent %s first",
394 rsc->id, rsc->parent->id);
395 rsc->parent->cmds->assign(rsc->parent, prefer);
396 }
397
399 return rsc->allocated_to; // Assignment has already been done
400 }
401
402 // Ensure we detect assignment loops
404 pe_rsc_debug(rsc, "Breaking assignment loop involving %s", rsc->id);
405 return NULL;
406 }
408
409 pe__show_node_weights(true, rsc, "Pre-assignment", rsc->allowed_nodes,
410 rsc->cluster);
411
412 g_list_foreach(rsc->rsc_cons, apply_this_with, rsc);
413 pe__show_node_weights(true, rsc, "Post-this-with", rsc->allowed_nodes,
414 rsc->cluster);
415
416 g_list_foreach(rsc->rsc_cons_lhs, apply_with_this, rsc);
417
418 if (rsc->next_role == RSC_ROLE_STOPPED) {
419 pe_rsc_trace(rsc,
420 "Banning %s from all nodes because it will be stopped",
421 rsc->id);
423 rsc->cluster);
424
425 } else if ((rsc->next_role > rsc->role)
428 crm_notice("Resource %s cannot be elevated from %s to %s due to "
429 "no-quorum-policy=freeze",
430 rsc->id, role2text(rsc->role), role2text(rsc->next_role));
431 pe__set_next_role(rsc, rsc->role, "no-quorum-policy=freeze");
432 }
433
435 rsc, __func__, rsc->allowed_nodes, rsc->cluster);
436
437 // Unmanage resource if fencing is enabled but no device is configured
441 }
442
443 if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
444 // Unmanaged resources stay on their current node
445 const char *reason = NULL;
446 pe_node_t *assign_to = NULL;
447
448 pe__set_next_role(rsc, rsc->role, "unmanaged");
449 assign_to = pe__current_node(rsc);
450 if (assign_to == NULL) {
451 reason = "inactive";
452 } else if (rsc->role == RSC_ROLE_PROMOTED) {
453 reason = "promoted";
454 } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
455 reason = "failed";
456 } else {
457 reason = "active";
458 }
459 pe_rsc_info(rsc, "Unmanaged resource %s assigned to %s: %s", rsc->id,
460 (assign_to? assign_to->details->uname : "no node"), reason);
461 pcmk__finalize_assignment(rsc, assign_to, true);
462
464 pe_rsc_debug(rsc, "Forcing %s to stop: stop-all-resources", rsc->id);
465 pcmk__finalize_assignment(rsc, NULL, true);
466
467 } else if (pcmk_is_set(rsc->flags, pe_rsc_provisional)
468 && assign_best_node(rsc, prefer)) {
469 // Assignment successful
470
471 } else if (rsc->allocated_to == NULL) {
472 if (!pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
473 pe_rsc_info(rsc, "Resource %s cannot run anywhere", rsc->id);
474 } else if (rsc->running_on != NULL) {
475 pe_rsc_info(rsc, "Stopping orphan resource %s", rsc->id);
476 }
477
478 } else {
479 pe_rsc_debug(rsc, "%s: pre-assigned to %s", rsc->id,
480 pe__node_name(rsc->allocated_to));
481 }
482
484
485 if (rsc->is_remote_node) {
486 remote_connection_assigned(rsc);
487 }
488
489 return rsc->allocated_to;
490}
491
503static void
504schedule_restart_actions(pe_resource_t *rsc, pe_node_t *current,
505 bool need_stop, bool need_promote)
506{
507 enum rsc_role_e role = rsc->role;
508 enum rsc_role_e next_role;
509 rsc_transition_fn fn = NULL;
510
512
513 // Bring resource down to a stop on its current node
514 while (role != RSC_ROLE_STOPPED) {
515 next_role = rsc_state_matrix[role][RSC_ROLE_STOPPED];
516 pe_rsc_trace(rsc, "Creating %s action to take %s down from %s to %s",
517 (need_stop? "required" : "optional"), rsc->id,
518 role2text(role), role2text(next_role));
519 fn = rsc_action_matrix[role][next_role];
520 if (fn == NULL) {
521 break;
522 }
523 fn(rsc, current, !need_stop);
524 role = next_role;
525 }
526
527 // Bring resource up to its next role on its next node
528 while ((rsc->role <= rsc->next_role) && (role != rsc->role)
529 && !pcmk_is_set(rsc->flags, pe_rsc_block)) {
530 bool required = need_stop;
531
532 next_role = rsc_state_matrix[role][rsc->role];
533 if ((next_role == RSC_ROLE_PROMOTED) && need_promote) {
534 required = true;
535 }
536 pe_rsc_trace(rsc, "Creating %s action to take %s up from %s to %s",
537 (required? "required" : "optional"), rsc->id,
538 role2text(role), role2text(next_role));
539 fn = rsc_action_matrix[role][next_role];
540 if (fn == NULL) {
541 break;
542 }
543 fn(rsc, rsc->allocated_to, !required);
544 role = next_role;
545 }
546
548}
549
558static const char *
559set_default_next_role(pe_resource_t *rsc)
560{
561 if (rsc->next_role != RSC_ROLE_UNKNOWN) {
562 return "explicit";
563 }
564
565 if (rsc->allocated_to == NULL) {
566 pe__set_next_role(rsc, RSC_ROLE_STOPPED, "assignment");
567 } else {
568 pe__set_next_role(rsc, RSC_ROLE_STARTED, "assignment");
569 }
570 return "implicit";
571}
572
579static void
580create_pending_start(pe_resource_t *rsc)
581{
582 pe_action_t *start = NULL;
583
584 pe_rsc_trace(rsc,
585 "Creating action for %s to represent already pending start",
586 rsc->id);
587 start = start_action(rsc, rsc->allocated_to, TRUE);
589}
590
597static void
598schedule_role_transition_actions(pe_resource_t *rsc)
599{
600 enum rsc_role_e role = rsc->role;
601
602 while (role != rsc->next_role) {
603 enum rsc_role_e next_role = rsc_state_matrix[role][rsc->next_role];
604 rsc_transition_fn fn = NULL;
605
606 pe_rsc_trace(rsc,
607 "Creating action to take %s from %s to %s (ending at %s)",
608 rsc->id, role2text(role), role2text(next_role),
609 role2text(rsc->next_role));
610 fn = rsc_action_matrix[role][next_role];
611 if (fn == NULL) {
612 break;
613 }
614 fn(rsc, rsc->allocated_to, false);
615 role = next_role;
616 }
617}
618
625void
627{
628 bool need_stop = false;
629 bool need_promote = false;
630 bool is_moving = false;
631 bool allow_migrate = false;
632 bool multiply_active = false;
633
634 pe_node_t *current = NULL;
635 unsigned int num_all_active = 0;
636 unsigned int num_clean_active = 0;
637 const char *next_role_source = NULL;
638
639 CRM_ASSERT(rsc != NULL);
640
641 next_role_source = set_default_next_role(rsc);
642 pe_rsc_trace(rsc,
643 "Creating all actions for %s transition from %s to %s "
644 "(%s) on %s",
645 rsc->id, role2text(rsc->role), role2text(rsc->next_role),
646 next_role_source, pe__node_name(rsc->allocated_to));
647
648 current = pe__find_active_on(rsc, &num_all_active, &num_clean_active);
649
651 rsc);
652
653 if ((current != NULL) && (rsc->allocated_to != NULL)
654 && (current->details != rsc->allocated_to->details)
655 && (rsc->next_role >= RSC_ROLE_STARTED)) {
656
657 pe_rsc_trace(rsc, "Moving %s from %s to %s",
658 rsc->id, pe__node_name(current),
659 pe__node_name(rsc->allocated_to));
660 is_moving = true;
661 allow_migrate = pcmk__rsc_can_migrate(rsc, current);
662
663 // This is needed even if migrating (though I'm not sure why ...)
664 need_stop = true;
665 }
666
667 // Check whether resource is partially migrated and/or multiply active
668 if ((rsc->partial_migration_source != NULL)
669 && (rsc->partial_migration_target != NULL)
670 && allow_migrate && (num_all_active == 2)
671 && pe__same_node(current, rsc->partial_migration_source)
672 && pe__same_node(rsc->allocated_to, rsc->partial_migration_target)) {
673 /* A partial migration is in progress, and the migration target remains
674 * the same as when the migration began.
675 */
676 pe_rsc_trace(rsc, "Partial migration of %s from %s to %s will continue",
677 rsc->id, pe__node_name(rsc->partial_migration_source),
678 pe__node_name(rsc->partial_migration_target));
679
680 } else if ((rsc->partial_migration_source != NULL)
681 || (rsc->partial_migration_target != NULL)) {
682 // A partial migration is in progress but can't be continued
683
684 if (num_all_active > 2) {
685 // The resource is migrating *and* multiply active!
686 crm_notice("Forcing recovery of %s because it is migrating "
687 "from %s to %s and possibly active elsewhere",
688 rsc->id, pe__node_name(rsc->partial_migration_source),
689 pe__node_name(rsc->partial_migration_target));
690 } else {
691 // The migration source or target isn't available
692 crm_notice("Forcing recovery of %s because it can no longer "
693 "migrate from %s to %s",
694 rsc->id, pe__node_name(rsc->partial_migration_source),
695 pe__node_name(rsc->partial_migration_target));
696 }
697 need_stop = true;
699 allow_migrate = false;
700
701 } else if (pcmk_is_set(rsc->flags, pe_rsc_needs_fencing)) {
702 multiply_active = (num_all_active > 1);
703 } else {
704 /* If a resource has "requires" set to nothing or quorum, don't consider
705 * it active on unclean nodes (similar to how all resources behave when
706 * stonith-enabled is false). We can start such resources elsewhere
707 * before fencing completes, and if we considered the resource active on
708 * the failed node, we would attempt recovery for being active on
709 * multiple nodes.
710 */
711 multiply_active = (num_clean_active > 1);
712 }
713
714 if (multiply_active) {
715 const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
716
717 // Resource was (possibly) incorrectly multiply active
718 pe_proc_err("%s resource %s might be active on %u nodes (%s)",
719 pcmk__s(class, "Untyped"), rsc->id, num_all_active,
720 recovery2text(rsc->recovery_type));
721 crm_notice("See https://wiki.clusterlabs.org/wiki/FAQ"
722 "#Resource_is_Too_Active for more information");
723
724 switch (rsc->recovery_type) {
726 need_stop = true;
727 break;
729 need_stop = true; // stop_resource() will skip expected node
731 break;
732 default:
733 break;
734 }
735
736 } else {
738 }
739
741 create_pending_start(rsc);
742 }
743
744 if (is_moving) {
745 // Remaining tests are only for resources staying where they are
746
747 } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
748 if (pcmk_is_set(rsc->flags, pe_rsc_stop)) {
749 need_stop = true;
750 pe_rsc_trace(rsc, "Recovering %s", rsc->id);
751 } else {
752 pe_rsc_trace(rsc, "Recovering %s by demotion", rsc->id);
753 if (rsc->next_role == RSC_ROLE_PROMOTED) {
754 need_promote = true;
755 }
756 }
757
758 } else if (pcmk_is_set(rsc->flags, pe_rsc_block)) {
759 pe_rsc_trace(rsc, "Blocking further actions on %s", rsc->id);
760 need_stop = true;
761
762 } else if ((rsc->role > RSC_ROLE_STARTED) && (current != NULL)
763 && (rsc->allocated_to != NULL)) {
764 pe_action_t *start = NULL;
765
766 pe_rsc_trace(rsc, "Creating start action for promoted resource %s",
767 rsc->id);
768 start = start_action(rsc, rsc->allocated_to, TRUE);
769 if (!pcmk_is_set(start->flags, pe_action_optional)) {
770 // Recovery of a promoted resource
771 pe_rsc_trace(rsc, "%s restart is required for recovery", rsc->id);
772 need_stop = true;
773 }
774 }
775
776 // Create any actions needed to bring resource down and back up to same role
777 schedule_restart_actions(rsc, current, need_stop, need_promote);
778
779 // Create any actions needed to take resource from this role to the next
780 schedule_role_transition_actions(rsc);
781
783
784 if (allow_migrate) {
785 pcmk__create_migration_actions(rsc, current);
786 }
787}
788
795static void
796rsc_avoids_remote_nodes(const pe_resource_t *rsc)
797{
798 GHashTableIter iter;
799 pe_node_t *node = NULL;
800
801 g_hash_table_iter_init(&iter, rsc->allowed_nodes);
802 while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
803 if (node->details->remote_rsc != NULL) {
804 node->weight = -INFINITY;
805 }
806 }
807}
808
822static GList *
823allowed_nodes_as_list(const pe_resource_t *rsc)
824{
825 GList *allowed_nodes = NULL;
826
827 if (rsc->allowed_nodes) {
828 allowed_nodes = g_hash_table_get_values(rsc->allowed_nodes);
829 }
830
831 if (!pcmk__is_daemon) {
832 allowed_nodes = g_list_sort(allowed_nodes, pe__cmp_node_name);
833 }
834
835 return allowed_nodes;
836}
837
844void
846{
847 pe_resource_t *top = NULL;
848 GList *allowed_nodes = NULL;
849 bool check_unfencing = false;
850 bool check_utilization = false;
851
852 CRM_ASSERT(rsc != NULL);
853
854 if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
855 pe_rsc_trace(rsc,
856 "Skipping implicit constraints for unmanaged resource %s",
857 rsc->id);
858 return;
859 }
860
861 top = uber_parent(rsc);
862
863 // Whether resource requires unfencing
864 check_unfencing = !pcmk_is_set(rsc->flags, pe_rsc_fence_device)
867
868 // Whether a non-default placement strategy is used
869 check_utilization = (g_hash_table_size(rsc->utilization) > 0)
870 && !pcmk__str_eq(rsc->cluster->placement_strategy,
871 "default", pcmk__str_casei);
872
873 // Order stops before starts (i.e. restart)
874 pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
875 rsc, pcmk__op_key(rsc->id, RSC_START, 0), NULL,
877 rsc->cluster);
878
879 // Promotable ordering: demote before stop, start before promote
881 || (rsc->role > RSC_ROLE_UNPROMOTED)) {
882
883 pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_DEMOTE, 0), NULL,
884 rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
886
887 pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_START, 0), NULL,
888 rsc, pcmk__op_key(rsc->id, RSC_PROMOTE, 0), NULL,
890 }
891
892 // Don't clear resource history if probing on same node
894 NULL, rsc, pcmk__op_key(rsc->id, RSC_STATUS, 0),
896 rsc->cluster);
897
898 // Certain checks need allowed nodes
899 if (check_unfencing || check_utilization || (rsc->container != NULL)) {
900 allowed_nodes = allowed_nodes_as_list(rsc);
901 }
902
903 if (check_unfencing) {
904 g_list_foreach(allowed_nodes, pcmk__order_restart_vs_unfence, rsc);
905 }
906
907 if (check_utilization) {
908 pcmk__create_utilization_constraints(rsc, allowed_nodes);
909 }
910
911 if (rsc->container != NULL) {
912 pe_resource_t *remote_rsc = NULL;
913
914 if (rsc->is_remote_node) {
915 // rsc is the implicit remote connection for a guest or bundle node
916
917 /* Guest resources are not allowed to run on Pacemaker Remote nodes,
918 * to avoid nesting remotes. However, bundles are allowed.
919 */
921 rsc_avoids_remote_nodes(rsc->container);
922 }
923
924 /* If someone cleans up a guest or bundle node's container, we will
925 * likely schedule a (re-)probe of the container and recovery of the
926 * connection. Order the connection stop after the container probe,
927 * so that if we detect the container running, we will trigger a new
928 * transition and avoid the unnecessary recovery.
929 */
932
933 /* A user can specify that a resource must start on a Pacemaker Remote
934 * node by explicitly configuring it with the container=NODENAME
935 * meta-attribute. This is of questionable merit, since location
936 * constraints can accomplish the same thing. But we support it, so here
937 * we check whether a resource (that is not itself a remote connection)
938 * has container set to a remote node or guest node resource.
939 */
940 } else if (rsc->container->is_remote_node) {
941 remote_rsc = rsc->container;
942 } else {
944 rsc->container);
945 }
946
947 if (remote_rsc != NULL) {
948 /* Force the resource on the Pacemaker Remote node instead of
949 * colocating the resource with the container resource.
950 */
951 for (GList *item = allowed_nodes; item; item = item->next) {
952 pe_node_t *node = item->data;
953
954 if (node->details->remote_rsc != remote_rsc) {
955 node->weight = -INFINITY;
956 }
957 }
958
959 } else {
960 /* This resource is either a filler for a container that does NOT
961 * represent a Pacemaker Remote node, or a Pacemaker Remote
962 * connection resource for a guest node or bundle.
963 */
964 int score;
965
966 crm_trace("Order and colocate %s relative to its container %s",
967 rsc->id, rsc->container->id);
968
971 NULL, rsc, pcmk__op_key(rsc->id, RSC_START, 0),
972 NULL,
974 rsc->cluster);
975
976 pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
977 rsc->container,
979 NULL, pe_order_implies_first, rsc->cluster);
980
982 score = 10000; /* Highly preferred but not essential */
983 } else {
984 score = INFINITY; /* Force them to run on the same host */
985 }
986 pcmk__new_colocation("resource-with-container", NULL, score, rsc,
987 rsc->container, NULL, NULL, true,
988 rsc->cluster);
989 }
990 }
991
993 /* Remote connections and fencing devices are not allowed to run on
994 * Pacemaker Remote nodes
995 */
996 rsc_avoids_remote_nodes(rsc);
997 }
998 g_list_free(allowed_nodes);
999}
1000
1014void
1016 const pe_resource_t *primary,
1017 const pcmk__colocation_t *colocation,
1018 bool for_dependent)
1019{
1020 enum pcmk__coloc_affects filter_results;
1021
1022 CRM_CHECK((colocation != NULL) && (dependent != NULL) && (primary != NULL),
1023 return);
1024
1025 if (for_dependent) {
1026 // Always process on behalf of primary resource
1027 primary->cmds->apply_coloc_score(dependent, primary, colocation, false);
1028 return;
1029 }
1030
1031 filter_results = pcmk__colocation_affects(dependent, primary, colocation,
1032 false);
1033 pe_rsc_trace(dependent, "%s %s with %s (%s, score=%d, filter=%d)",
1034 ((colocation->score > 0)? "Colocating" : "Anti-colocating"),
1035 dependent->id, primary->id, colocation->id, colocation->score,
1036 filter_results);
1037
1038 switch (filter_results) {
1040 pcmk__apply_coloc_to_priority(dependent, primary, colocation);
1041 break;
1043 pcmk__apply_coloc_to_weights(dependent, primary, colocation);
1044 break;
1045 default: // pcmk__coloc_affects_nothing
1046 return;
1047 }
1048}
1049
1059enum pe_action_flags
1061{
1062 CRM_ASSERT(action != NULL);
1063 return action->flags;
1064}
1065
1079static bool
1080is_expected_node(const pe_resource_t *rsc, const pe_node_t *node)
1081{
1082 return pcmk_all_flags_set(rsc->flags,
1084 && (rsc->next_role > RSC_ROLE_STOPPED)
1085 && pe__same_node(rsc->allocated_to, node);
1086}
1087
1096static void
1097stop_resource(pe_resource_t *rsc, pe_node_t *node, bool optional)
1098{
1099 for (GList *iter = rsc->running_on; iter != NULL; iter = iter->next) {
1100 pe_node_t *current = (pe_node_t *) iter->data;
1101 pe_action_t *stop = NULL;
1102
1103 if (is_expected_node(rsc, current)) {
1104 /* We are scheduling restart actions for a multiply active resource
1105 * with multiple-active=stop_unexpected, and this is where it should
1106 * not be stopped.
1107 */
1108 pe_rsc_trace(rsc,
1109 "Skipping stop of multiply active resource %s "
1110 "on expected node %s",
1111 rsc->id, pe__node_name(current));
1112 continue;
1113 }
1114
1115 if (rsc->partial_migration_target != NULL) {
1116 // Continue migration if node originally was and remains target
1117 if (pe__same_node(current, rsc->partial_migration_target)
1118 && pe__same_node(current, rsc->allocated_to)) {
1119 pe_rsc_trace(rsc,
1120 "Skipping stop of %s on %s "
1121 "because partial migration there will continue",
1122 rsc->id, pe__node_name(current));
1123 continue;
1124 } else {
1125 pe_rsc_trace(rsc,
1126 "Forcing stop of %s on %s "
1127 "because migration target changed",
1128 rsc->id, pe__node_name(current));
1129 optional = false;
1130 }
1131 }
1132
1133 pe_rsc_trace(rsc, "Scheduling stop of %s on %s",
1134 rsc->id, pe__node_name(current));
1135 stop = stop_action(rsc, current, optional);
1136
1137 if (rsc->allocated_to == NULL) {
1138 pe_action_set_reason(stop, "node availability", true);
1139 } else if (pcmk_all_flags_set(rsc->flags, pe_rsc_restarting
1141 /* We are stopping a multiply active resource on a node that is
1142 * not its expected node, and we are still scheduling restart
1143 * actions, so the stop is for being multiply active.
1144 */
1145 pe_action_set_reason(stop, "being multiply active", true);
1146 }
1147
1148 if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
1150 }
1151
1153 pcmk__schedule_cleanup(rsc, current, optional);
1154 }
1155
1157 pe_action_t *unfence = pe_fence_op(current, "on", true, NULL, false,
1158 rsc->cluster);
1159
1160 order_actions(stop, unfence, pe_order_implies_first);
1161 if (!pcmk__node_unfenced(current)) {
1162 pe_proc_err("Stopping %s until %s can be unfenced",
1163 rsc->id, pe__node_name(current));
1164 }
1165 }
1166 }
1167}
1168
1177static void
1178start_resource(pe_resource_t *rsc, pe_node_t *node, bool optional)
1179{
1180 pe_action_t *start = NULL;
1181
1182 CRM_ASSERT(node != NULL);
1183
1184 pe_rsc_trace(rsc, "Scheduling %s start of %s on %s (score %d)",
1185 (optional? "optional" : "required"), rsc->id,
1186 pe__node_name(node), node->weight);
1187 start = start_action(rsc, node, TRUE);
1188
1190
1191 if (pcmk_is_set(start->flags, pe_action_runnable) && !optional) {
1193 }
1194
1195 if (is_expected_node(rsc, node)) {
1196 /* This could be a problem if the start becomes necessary for other
1197 * reasons later.
1198 */
1199 pe_rsc_trace(rsc,
1200 "Start of multiply active resource %s "
1201 "on expected node %s will be a pseudo-action",
1202 rsc->id, pe__node_name(node));
1204 }
1205}
1206
1215static void
1216promote_resource(pe_resource_t *rsc, pe_node_t *node, bool optional)
1217{
1218 GList *iter = NULL;
1219 GList *action_list = NULL;
1220 bool runnable = true;
1221
1222 CRM_ASSERT(node != NULL);
1223
1224 // Any start must be runnable for promotion to be runnable
1225 action_list = pe__resource_actions(rsc, node, RSC_START, true);
1226 for (iter = action_list; iter != NULL; iter = iter->next) {
1227 pe_action_t *start = (pe_action_t *) iter->data;
1228
1229 if (!pcmk_is_set(start->flags, pe_action_runnable)) {
1230 runnable = false;
1231 }
1232 }
1233 g_list_free(action_list);
1234
1235 if (runnable) {
1236 pe_action_t *promote = promote_action(rsc, node, optional);
1237
1238 pe_rsc_trace(rsc, "Scheduling %s promotion of %s on %s",
1239 (optional? "optional" : "required"), rsc->id,
1240 pe__node_name(node));
1241
1242 if (is_expected_node(rsc, node)) {
1243 /* This could be a problem if the promote becomes necessary for
1244 * other reasons later.
1245 */
1246 pe_rsc_trace(rsc,
1247 "Promotion of multiply active resource %s "
1248 "on expected node %s will be a pseudo-action",
1249 rsc->id, pe__node_name(node));
1251 }
1252 } else {
1253 pe_rsc_trace(rsc, "Not promoting %s on %s: start unrunnable",
1254 rsc->id, pe__node_name(node));
1255 action_list = pe__resource_actions(rsc, node, RSC_PROMOTE, true);
1256 for (iter = action_list; iter != NULL; iter = iter->next) {
1257 pe_action_t *promote = (pe_action_t *) iter->data;
1258
1260 }
1261 g_list_free(action_list);
1262 }
1263}
1264
1273static void
1274demote_resource(pe_resource_t *rsc, pe_node_t *node, bool optional)
1275{
1276 /* Since this will only be called for a primitive (possibly as an instance
1277 * of a collective resource), the resource is multiply active if it is
1278 * running on more than one node, so we want to demote on all of them as
1279 * part of recovery, regardless of which one is the desired node.
1280 */
1281 for (GList *iter = rsc->running_on; iter != NULL; iter = iter->next) {
1282 pe_node_t *current = (pe_node_t *) iter->data;
1283
1284 if (is_expected_node(rsc, current)) {
1285 pe_rsc_trace(rsc,
1286 "Skipping demote of multiply active resource %s "
1287 "on expected node %s",
1288 rsc->id, pe__node_name(current));
1289 } else {
1290 pe_rsc_trace(rsc, "Scheduling %s demotion of %s on %s",
1291 (optional? "optional" : "required"), rsc->id,
1292 pe__node_name(current));
1293 demote_action(rsc, current, optional);
1294 }
1295 }
1296}
1297
1298static void
1299assert_role_error(pe_resource_t *rsc, pe_node_t *node, bool optional)
1300{
1301 CRM_ASSERT(false);
1302}
1303
1312void
1313pcmk__schedule_cleanup(pe_resource_t *rsc, const pe_node_t *node, bool optional)
1314{
1315 /* If the cleanup is required, its orderings are optional, because they're
1316 * relevant only if both actions are required. Conversely, if the cleanup is
1317 * optional, the orderings make the then action required if the first action
1318 * becomes required.
1319 */
1320 uint32_t flag = optional? pe_order_implies_then : pe_order_optional;
1321
1322 CRM_CHECK((rsc != NULL) && (node != NULL), return);
1323
1324 if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
1325 pe_rsc_trace(rsc, "Skipping clean-up of %s on %s: resource failed",
1326 rsc->id, pe__node_name(node));
1327 return;
1328 }
1329
1330 if (node->details->unclean || !node->details->online) {
1331 pe_rsc_trace(rsc, "Skipping clean-up of %s on %s: node unavailable",
1332 rsc->id, pe__node_name(node));
1333 return;
1334 }
1335
1336 crm_notice("Scheduling clean-up of %s on %s", rsc->id, pe__node_name(node));
1337 delete_action(rsc, node, optional);
1338
1339 // stop -> clean-up -> start
1342}
1343
1351void
1353{
1354 char *name = NULL;
1355 char *value = NULL;
1356 const pe_resource_t *parent = NULL;
1357
1358 CRM_ASSERT((rsc != NULL) && (xml != NULL));
1359
1360 /* Clone instance numbers get set internally as meta-attributes, and are
1361 * needed in the transition graph (for example, to tell unique clone
1362 * instances apart).
1363 */
1364 value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION);
1365 if (value != NULL) {
1367 crm_xml_add(xml, name, value);
1368 free(name);
1369 }
1370
1371 // Not sure if this one is really needed ...
1372 value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_REMOTE_NODE);
1373 if (value != NULL) {
1375 crm_xml_add(xml, name, value);
1376 free(name);
1377 }
1378
1379 /* The container meta-attribute can be set on the primitive itself or one of
1380 * its parents (for example, a group inside a container resource), so check
1381 * them all, and keep the highest one found.
1382 */
1383 for (parent = rsc; parent != NULL; parent = parent->parent) {
1384 if (parent->container != NULL) {
1386 parent->container->id);
1387 }
1388 }
1389
1390 /* Bundle replica children will get their external-ip set internally as a
1391 * meta-attribute. The graph action needs it, but under a different naming
1392 * convention than other meta-attributes.
1393 */
1394 value = g_hash_table_lookup(rsc->meta, "external-ip");
1395 if (value != NULL) {
1396 crm_xml_add(xml, "pcmk_external_ip", value);
1397 }
1398}
1399
1400// Primitive implementation of resource_alloc_functions_t:add_utilization()
1401void
1403 const pe_resource_t *orig_rsc, GList *all_rscs,
1404 GHashTable *utilization)
1405{
1406 if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
1407 return;
1408 }
1409
1410 pe_rsc_trace(orig_rsc, "%s: Adding primitive %s as colocated utilization",
1411 orig_rsc->id, rsc->id);
1412 pcmk__release_node_capacity(utilization, rsc);
1413}
1414
1424static time_t
1425shutdown_time(const pe_node_t *node)
1426{
1427 const char *shutdown = pe_node_attribute_raw(node, XML_CIB_ATTR_SHUTDOWN);
1428 time_t result = 0;
1429
1430 if (shutdown != NULL) {
1431 long long result_ll;
1432
1433 if (pcmk__scan_ll(shutdown, &result_ll, 0LL) == pcmk_rc_ok) {
1434 result = (time_t) result_ll;
1435 }
1436 }
1437 return (result == 0)? get_effective_time(node->details->data_set) : result;
1438}
1439
1447static void
1448ban_if_not_locked(gpointer data, gpointer user_data)
1449{
1450 pe_node_t *node = (pe_node_t *) data;
1451 pe_resource_t *rsc = (pe_resource_t *) user_data;
1452
1453 if (strcmp(node->details->uname, rsc->lock_node->details->uname) != 0) {
1456 }
1457}
1458
1459// Primitive implementation of resource_alloc_functions_t:shutdown_lock()
1460void
1462{
1463 const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
1464
1465 // Fence devices and remote connections can't be locked
1466 if (pcmk__str_eq(class, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_null_matches)
1467 || pe__resource_is_remote_conn(rsc, rsc->cluster)) {
1468 return;
1469 }
1470
1471 if (rsc->lock_node != NULL) {
1472 // The lock was obtained from resource history
1473
1474 if (rsc->running_on != NULL) {
1475 /* The resource was started elsewhere even though it is now
1476 * considered locked. This shouldn't be possible, but as a
1477 * failsafe, we don't want to disturb the resource now.
1478 */
1479 pe_rsc_info(rsc,
1480 "Cancelling shutdown lock because %s is already active",
1481 rsc->id);
1483 rsc->lock_node = NULL;
1484 rsc->lock_time = 0;
1485 }
1486
1487 // Only a resource active on exactly one node can be locked
1488 } else if (pcmk__list_of_1(rsc->running_on)) {
1489 pe_node_t *node = rsc->running_on->data;
1490
1491 if (node->details->shutdown) {
1492 if (node->details->unclean) {
1493 pe_rsc_debug(rsc, "Not locking %s to unclean %s for shutdown",
1494 rsc->id, pe__node_name(node));
1495 } else {
1496 rsc->lock_node = node;
1497 rsc->lock_time = shutdown_time(node);
1498 }
1499 }
1500 }
1501
1502 if (rsc->lock_node == NULL) {
1503 // No lock needed
1504 return;
1505 }
1506
1507 if (rsc->cluster->shutdown_lock > 0) {
1508 time_t lock_expiration = rsc->lock_time + rsc->cluster->shutdown_lock;
1509
1510 pe_rsc_info(rsc, "Locking %s to %s due to shutdown (expires @%lld)",
1511 rsc->id, pe__node_name(rsc->lock_node),
1512 (long long) lock_expiration);
1513 pe__update_recheck_time(++lock_expiration, rsc->cluster);
1514 } else {
1515 pe_rsc_info(rsc, "Locking %s to %s due to shutdown",
1516 rsc->id, pe__node_name(rsc->lock_node));
1517 }
1518
1519 // If resource is locked to one node, ban it from all other nodes
1520 g_list_foreach(rsc->cluster->nodes, ban_if_not_locked, rsc);
1521}
#define PCMK_RESOURCE_CLASS_STONITH
Definition: agents.h:33
const char * parent
Definition: cib.c:25
const char * name
Definition: cib.c:24
char * pcmk__op_key(const char *rsc_id, const char *op_type, guint interval_ms)
Generate an operation key (RESOURCE_ACTION_INTERVAL)
Definition: operations.c:45
bool pcmk__is_daemon
Definition: logging.c:47
const char * pcmk_readable_score(int score)
Return a displayable static string for a score value.
Definition: scores.c:86
char * crm_meta_name(const char *field)
Definition: utils.c:468
#define pcmk_is_set(g, f)
Convenience alias for pcmk_all_flags_set(), to check single flag.
Definition: util.h:121
@ recovery_stop_start
Definition: common.h:79
@ recovery_stop_unexpected
Definition: common.h:82
const char * role2text(enum rsc_role_e role)
Definition: common.c:454
rsc_role_e
Possible roles that a resource can be in.
Definition: common.h:92
@ RSC_ROLE_STARTED
Definition: common.h:95
@ RSC_ROLE_STOPPED
Definition: common.h:94
@ RSC_ROLE_PROMOTED
Definition: common.h:97
@ RSC_ROLE_UNKNOWN
Definition: common.h:93
@ RSC_ROLE_UNPROMOTED
Definition: common.h:96
#define RSC_ROLE_MAX
Definition: common.h:108
pe_resource_t * uber_parent(pe_resource_t *rsc)
Definition: complex.c:912
char data[0]
Definition: cpg.c:10
#define CRM_SCORE_INFINITY
Definition: crm.h:85
#define RSC_PROMOTE
Definition: crm.h:205
#define RSC_DEMOTE
Definition: crm.h:207
#define RSC_START
Definition: crm.h:199
#define CRM_META
Definition: crm.h:78
#define INFINITY
Definition: crm.h:99
#define RSC_STOP
Definition: crm.h:202
#define RSC_STATUS
Definition: crm.h:213
#define CRM_OP_LRM_DELETE
Definition: crm.h:149
#define RSC_DELETE
Definition: crm.h:193
void pcmk__abort_dangling_migration(void *data, void *user_data)
@ pcmk__coloc_select_active
G_GNUC_INTERNAL void pcmk__new_colocation(const char *id, const char *node_attr, int score, pe_resource_t *dependent, pe_resource_t *primary, const char *dependent_role, const char *primary_role, bool influence, pe_working_set_t *data_set)
G_GNUC_INTERNAL bool pcmk__any_node_available(GHashTable *nodes)
#define pcmk__order_resource_actions(first_rsc, first_task, then_rsc, then_task, flags)
void pcmk__create_migration_actions(pe_resource_t *rsc, const pe_node_t *current)
G_GNUC_INTERNAL void pcmk__create_utilization_constraints(pe_resource_t *rsc, GList *allowed_nodes)
G_GNUC_INTERNAL enum pcmk__coloc_affects pcmk__colocation_affects(const pe_resource_t *dependent, const pe_resource_t *primary, const pcmk__colocation_t *colocation, bool preview)
pcmk__coloc_affects
@ pcmk__coloc_affects_location
@ pcmk__coloc_affects_role
G_GNUC_INTERNAL void pcmk__order_vs_unfence(pe_resource_t *rsc, pe_node_t *node, pe_action_t *action, enum pe_ordering order)
G_GNUC_INTERNAL GList * pcmk__sort_nodes(GList *nodes, pe_node_t *active_node)
G_GNUC_INTERNAL void pcmk__order_restart_vs_unfence(gpointer data, gpointer user_data)
G_GNUC_INTERNAL void pcmk__add_colocated_node_scores(pe_resource_t *rsc, const char *log_id, GHashTable **nodes, const char *attr, float factor, uint32_t flags)
G_GNUC_INTERNAL bool pcmk__finalize_assignment(pe_resource_t *rsc, pe_node_t *chosen, bool force)
G_GNUC_INTERNAL const pe_node_t * pcmk__ban_insufficient_capacity(pe_resource_t *rsc)
G_GNUC_INTERNAL void pcmk__release_node_capacity(GHashTable *current_utilization, const pe_resource_t *rsc)
G_GNUC_INTERNAL bool pcmk__node_unfenced(pe_node_t *node)
G_GNUC_INTERNAL void pcmk__create_recurring_actions(pe_resource_t *rsc)
bool pcmk__rsc_can_migrate(const pe_resource_t *rsc, const pe_node_t *current)
G_GNUC_INTERNAL void pcmk__apply_coloc_to_priority(pe_resource_t *dependent, const pe_resource_t *primary, const pcmk__colocation_t *colocation)
G_GNUC_INTERNAL bool pcmk__node_available(const pe_node_t *node, bool consider_score, bool consider_guest)
G_GNUC_INTERNAL void pcmk__new_ordering(pe_resource_t *first_rsc, char *first_task, pe_action_t *first_action, pe_resource_t *then_rsc, char *then_task, pe_action_t *then_action, uint32_t flags, pe_working_set_t *data_set)
G_GNUC_INTERNAL GHashTable * pcmk__copy_node_table(GHashTable *nodes)
G_GNUC_INTERNAL void pcmk__apply_coloc_to_weights(pe_resource_t *dependent, const pe_resource_t *primary, const pcmk__colocation_t *colocation)
#define do_crm_log(level, fmt, args...)
Log a message.
Definition: logging.h:168
#define crm_notice(fmt, args...)
Definition: logging.h:361
#define CRM_CHECK(expr, failure_action)
Definition: logging.h:227
#define crm_trace(fmt, args...)
Definition: logging.h:365
#define XML_RSC_ATTR_TARGET_ROLE
Definition: msg_xml.h:236
#define XML_RSC_ATTR_REMOTE_NODE
Definition: msg_xml.h:247
#define XML_RSC_ATTR_CONTAINER
Definition: msg_xml.h:244
#define XML_CONFIG_ATTR_SHUTDOWN_LOCK
Definition: msg_xml.h:401
#define XML_RSC_ATTR_INCARNATION
Definition: msg_xml.h:228
#define XML_AGENT_ATTR_CLASS
Definition: msg_xml.h:269
#define XML_CIB_ATTR_SHUTDOWN
Definition: msg_xml.h:286
const char * crm_element_value(const xmlNode *data, const char *name)
Retrieve the value of an XML attribute.
Definition: nvpair.c:517
const char * crm_xml_add(xmlNode *node, const char *name, const char *value)
Create an XML attribute with specified name and value.
Definition: nvpair.c:323
const char * action
Definition: pcmk_fence.c:30
pcmk__action_result_t result
Definition: pcmk_fence.c:35
void pcmk__primitive_create_actions(pe_resource_t *rsc)
void pcmk__schedule_cleanup(pe_resource_t *rsc, const pe_node_t *node, bool optional)
void pcmk__primitive_internal_constraints(pe_resource_t *rsc)
void pcmk__primitive_add_utilization(const pe_resource_t *rsc, const pe_resource_t *orig_rsc, GList *all_rscs, GHashTable *utilization)
void(* rsc_transition_fn)(pe_resource_t *rsc, pe_node_t *node, bool optional)
void pcmk__primitive_apply_coloc_score(pe_resource_t *dependent, const pe_resource_t *primary, const pcmk__colocation_t *colocation, bool for_dependent)
void pcmk__primitive_add_graph_meta(pe_resource_t *rsc, xmlNode *xml)
pe_node_t * pcmk__primitive_assign(pe_resource_t *rsc, const pe_node_t *prefer)
void pcmk__primitive_shutdown_lock(pe_resource_t *rsc)
enum pe_action_flags pcmk__primitive_action_flags(pe_action_t *action, const pe_node_t *node)
@ no_quorum_freeze
Definition: pe_types.h:64
#define pe_rsc_fence_device
Definition: pe_types.h:263
#define pe_flag_have_stonith_resource
Definition: pe_types.h:100
#define pe_rsc_needs_unfencing
Definition: pe_types.h:295
#define pe_flag_have_quorum
Definition: pe_types.h:95
#define pe_rsc_block
Definition: pe_types.h:258
#define pe_rsc_stop_unexpected
Definition: pe_types.h:286
#define pe_rsc_managed
Definition: pe_types.h:257
@ pe_order_restart
Definition: pe_types.h:503
@ pe_order_implies_then
Definition: pe_types.h:485
@ pe_order_same_node
Definition: pe_types.h:506
@ pe_order_then_cancels_first
Definition: pe_types.h:517
@ pe_order_optional
Definition: pe_types.h:481
@ pe_order_implies_first
Definition: pe_types.h:484
@ pe_order_runnable_left
Definition: pe_types.h:491
@ pe_order_promoted_implies_first
Definition: pe_types.h:486
#define pe_rsc_provisional
Definition: pe_types.h:266
#define pe_flag_enable_unfencing
Definition: pe_types.h:101
#define pe_rsc_allocating
Definition: pe_types.h:267
#define pe_rsc_orphan
Definition: pe_types.h:256
#define pe_flag_stop_everything
Definition: pe_types.h:106
#define pe_rsc_allow_remote_remotes
Definition: pe_types.h:273
#define pe_rsc_restarting
Definition: pe_types.h:269
#define pe_flag_show_scores
Definition: pe_types.h:134
pe_action_flags
Definition: pe_types.h:298
@ pe_action_optional
Definition: pe_types.h:301
@ pe_action_runnable
Definition: pe_types.h:300
@ pe_action_pseudo
Definition: pe_types.h:299
@ pe_action_print_always
Definition: pe_types.h:302
#define pe_rsc_stop
Definition: pe_types.h:271
#define pe_flag_stonith_enabled
Definition: pe_types.h:99
#define pe_rsc_needs_fencing
Definition: pe_types.h:294
#define pe_flag_remove_after_stop
Definition: pe_types.h:111
#define pe_rsc_failed
Definition: pe_types.h:276
#define pe_rsc_promotable
Definition: pe_types.h:264
#define pe_rsc_start_pending
Definition: pe_types.h:278
GList * pe__resource_actions(const pe_resource_t *rsc, const pe_node_t *node, const char *task, bool require_node)
Find all actions of given type for a resource.
Definition: pe_actions.c:1398
#define pe__show_node_weights(level, rsc, text, nodes, data_set)
Definition: internal.h:394
#define delete_action(rsc, node, optional)
Definition: internal.h:405
pe_action_t * pe__clear_resource_history(pe_resource_t *rsc, pe_node_t *node, pe_working_set_t *data_set)
Definition: pe_actions.c:1473
const char * pe_node_attribute_raw(const pe_node_t *node, const char *name)
Definition: common.c:562
pe_node_t * pe__find_active_on(const pe_resource_t *rsc, unsigned int *count_all, unsigned int *count_clean)
Definition: complex.c:1003
#define demote_action(rsc, node, optional)
Definition: internal.h:441
#define start_action(rsc, node, optional)
Definition: internal.h:421
void pe__set_next_role(pe_resource_t *rsc, enum rsc_role_e role, const char *why)
Definition: complex.c:1120
#define pe__clear_resource_flags(resource, flags_to_clear)
Definition: internal.h:80
#define pe_rsc_debug(rsc, fmt, args...)
Definition: internal.h:46
gint pe__cmp_node_name(gconstpointer a, gconstpointer b)
Definition: utils.c:180
gboolean order_actions(pe_action_t *lh_action, pe_action_t *rh_action, enum pe_ordering order)
Definition: utils.c:474
#define pe_rsc_trace(rsc, fmt, args...)
Definition: internal.h:47
pe_action_t * pe_fence_op(pe_node_t *node, const char *op, bool optional, const char *reason, bool priority_delay, pe_working_set_t *data_set)
Definition: pe_actions.c:1081
void resource_location(pe_resource_t *rsc, pe_node_t *node, int score, const char *tag, pe_working_set_t *data_set)
Definition: utils.c:385
#define pe__set_resource_flags(resource, flags_to_set)
Definition: internal.h:74
time_t get_effective_time(pe_working_set_t *data_set)
Definition: utils.c:421
#define stop_action(rsc, node, optional)
Definition: internal.h:415
#define pe_rsc_info(rsc, fmt, args...)
Definition: internal.h:45
#define pe_proc_err(fmt...)
Definition: internal.h:59
void pe__update_recheck_time(time_t recheck, pe_working_set_t *data_set)
Definition: utils.c:703
#define pe__clear_action_flags(action, flags_to_clear)
Definition: internal.h:95
#define promote_action(rsc, node, optional)
Definition: internal.h:431
void pe_action_set_reason(pe_action_t *action, const char *reason, bool overwrite)
Definition: pe_actions.c:1447
#define pe__set_action_flags(action, flags_to_set)
Definition: internal.h:86
pe_resource_t * pe__resource_contains_guest_node(const pe_working_set_t *data_set, const pe_resource_t *rsc)
Definition: remote.c:66
bool pe__resource_is_remote_conn(const pe_resource_t *rsc, const pe_working_set_t *data_set)
Definition: remote.c:17
#define CRM_ASSERT(expr)
Definition: results.h:42
@ pcmk_rc_ok
Definition: results.h:148
pe_node_t * pe_find_node(GList *node_list, const char *uname)
Definition: status.c:443
int pcmk__scan_ll(const char *text, long long *result, long long default_value)
Definition: strings.c:97
@ pcmk__str_null_matches
@ pcmk__str_casei
pe_resource_t * primary
const char * node_attribute
pe_resource_t * dependent
enum pe_action_flags flags
Definition: pe_types.h:415
int weight
Definition: pe_types.h:249
struct pe_node_shared_s * details
Definition: pe_types.h:252
gboolean shutdown
Definition: pe_types.h:226
const char * id
Definition: pe_types.h:215
gboolean online
Definition: pe_types.h:220
const char * uname
Definition: pe_types.h:216
pe_working_set_t * data_set
Cluster that this node is part of.
Definition: pe_types.h:245
pe_resource_t * remote_rsc
Definition: pe_types.h:237
gboolean unclean
Definition: pe_types.h:224
gboolean unseen
Definition: pe_types.h:225
GList * running_on
Definition: pe_types.h:373
GHashTable * meta
Definition: pe_types.h:380
GList * rsc_cons
Definition: pe_types.h:364
GList * rsc_cons_lhs
Definition: pe_types.h:363
pe_working_set_t * cluster
Definition: pe_types.h:335
pe_resource_t * container
Definition: pe_types.h:387
gboolean is_remote_node
Definition: pe_types.h:358
char * id
Definition: pe_types.h:329
GHashTable * utilization
Definition: pe_types.h:382
GHashTable * allowed_nodes
Definition: pe_types.h:375
GList * dangling_migrations
Definition: pe_types.h:385
pe_node_t * partial_migration_source
Definition: pe_types.h:372
pe_node_t * allocated_to
Definition: pe_types.h:370
unsigned long long flags
Definition: pe_types.h:355
pe_resource_t * parent
Definition: pe_types.h:336
pe_node_t * partial_migration_target
Definition: pe_types.h:371
resource_alloc_functions_t * cmds
Definition: pe_types.h:341
enum rsc_role_e next_role
Definition: pe_types.h:378
enum rsc_role_e role
Definition: pe_types.h:377
time_t lock_time
Definition: pe_types.h:392
pe_node_t * lock_node
Definition: pe_types.h:391
const char * placement_strategy
Definition: pe_types.h:151
guint shutdown_lock
Definition: pe_types.h:196
unsigned long long flags
Definition: pe_types.h:153
enum pe_quorum_policy no_quorum_policy
Definition: pe_types.h:156
GList * nodes
Definition: pe_types.h:164
void(* apply_coloc_score)(pe_resource_t *dependent, const pe_resource_t *primary, const pcmk__colocation_t *colocation, bool for_dependent)
pe_node_t *(* assign)(pe_resource_t *rsc, const pe_node_t *prefer)