Lines Matching refs:tmc
428 static inline bool tmigr_is_not_available(struct tmigr_cpu *tmc)
430 return !(tmc->tmgroup && tmc->online);
526 struct tmigr_cpu *tmc)
528 struct tmigr_group *child = NULL, *group = tmc->tmgroup;
547 static void walk_groups(up_f up, struct tmigr_walk *data, struct tmigr_cpu *tmc)
549 lockdep_assert_held(&tmc->lock);
551 __walk_groups(up, data, tmc);
673 static void __tmigr_cpu_activate(struct tmigr_cpu *tmc)
677 data.childmask = tmc->groupmask;
679 trace_tmigr_cpu_active(tmc);
681 tmc->cpuevt.ignore = true;
682 WRITE_ONCE(tmc->wakeup, KTIME_MAX);
684 walk_groups(&tmigr_active_up, &data, tmc);
694 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
696 if (tmigr_is_not_available(tmc))
699 if (WARN_ON_ONCE(!tmc->idle))
702 raw_spin_lock(&tmc->lock);
703 tmc->idle = false;
704 __tmigr_cpu_activate(tmc);
705 raw_spin_unlock(&tmc->lock);
881 static u64 tmigr_new_timer(struct tmigr_cpu *tmc, u64 nextexp)
885 .evt = &tmc->cpuevt };
887 lockdep_assert_held(&tmc->lock);
889 if (tmc->remote)
892 trace_tmigr_cpu_new_timer(tmc);
894 tmc->cpuevt.ignore = false;
897 walk_groups(&tmigr_new_timer_up, &data, tmc);
908 struct tmigr_cpu *tmc;
910 tmc = per_cpu_ptr(&tmigr_cpu, cpu);
912 raw_spin_lock_irq(&tmc->lock);
929 if (!tmc->online || tmc->remote || tmc->cpuevt.ignore ||
930 now < tmc->cpuevt.nextevt.expires) {
931 raw_spin_unlock_irq(&tmc->lock);
935 trace_tmigr_handle_remote_cpu(tmc);
937 tmc->remote = true;
938 WRITE_ONCE(tmc->wakeup, KTIME_MAX);
941 raw_spin_unlock_irq(&tmc->lock);
949 * the top). During fetching the next timer interrupt, also tmc->lock
963 raw_spin_lock(&tmc->lock);
976 if (!tmc->online || !tmc->idle) {
987 data.evt = &tmc->cpuevt;
995 walk_groups(&tmigr_new_timer_up, &data, tmc);
998 tmc->remote = false;
999 raw_spin_unlock_irq(&tmc->lock);
1060 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
1063 if (tmigr_is_not_available(tmc))
1066 data.childmask = tmc->groupmask;
1074 if (!tmigr_check_migrator(tmc->tmgroup, tmc->groupmask)) {
1080 if (READ_ONCE(tmc->wakeup) == KTIME_MAX)
1087 * Update @tmc->wakeup only at the end and do not reset @tmc->wakeup to
1088 * KTIME_MAX. Even if tmc->lock is not held during the whole remote
1089 * handling, tmc->wakeup is fine to be stale as it is called in
1094 __walk_groups(&tmigr_handle_remote_up, &data, tmc);
1096 raw_spin_lock_irq(&tmc->lock);
1097 WRITE_ONCE(tmc->wakeup, data.firstexp);
1098 raw_spin_unlock_irq(&tmc->lock);
1158 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
1163 if (tmigr_is_not_available(tmc))
1167 data.childmask = tmc->groupmask;
1169 data.tmc_active = !tmc->idle;
1176 * Check is done lockless as interrupts are disabled and @tmc->idle is
1179 if (!tmc->idle) {
1180 __walk_groups(&tmigr_requires_handle_remote_up, &data, tmc);
1186 * When the CPU is idle, compare @tmc->wakeup with @data.now. The lock
1192 if (data.now >= READ_ONCE(tmc->wakeup))
1195 raw_spin_lock(&tmc->lock);
1196 if (data.now >= tmc->wakeup)
1198 raw_spin_unlock(&tmc->lock);
1205 * tmigr_cpu_new_timer() - enqueue next global timer into hierarchy (idle tmc)
1210 * and thereby the timer idle path is executed once more. @tmc->wakeup
1219 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
1222 if (tmigr_is_not_available(tmc))
1225 raw_spin_lock(&tmc->lock);
1227 ret = READ_ONCE(tmc->wakeup);
1229 if (nextexp != tmc->cpuevt.nextevt.expires ||
1230 tmc->cpuevt.ignore) {
1231 ret = tmigr_new_timer(tmc, nextexp);
1236 WRITE_ONCE(tmc->wakeup, ret);
1239 trace_tmigr_cpu_new_timer_idle(tmc, nextexp);
1240 raw_spin_unlock(&tmc->lock);
1321 static u64 __tmigr_cpu_deactivate(struct tmigr_cpu *tmc, u64 nextexp)
1325 .evt = &tmc->cpuevt,
1326 .childmask = tmc->groupmask };
1334 tmc->cpuevt.ignore = false;
1336 walk_groups(&tmigr_inactive_up, &data, tmc);
1352 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
1355 if (tmigr_is_not_available(tmc))
1358 raw_spin_lock(&tmc->lock);
1360 ret = __tmigr_cpu_deactivate(tmc, nextexp);
1362 tmc->idle = true;
1368 WRITE_ONCE(tmc->wakeup, ret);
1370 trace_tmigr_cpu_idle(tmc, nextexp);
1371 raw_spin_unlock(&tmc->lock);
1395 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
1396 struct tmigr_group *group = tmc->tmgroup;
1398 if (tmigr_is_not_available(tmc))
1401 if (WARN_ON_ONCE(tmc->idle))
1404 if (!tmigr_check_migrator_and_lonely(tmc->tmgroup, tmc->groupmask))
1433 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
1435 WARN_ON_ONCE(!tmc->online || tmc->idle);
1442 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
1446 raw_spin_lock_irq(&tmc->lock);
1447 tmc->online = false;
1448 WRITE_ONCE(tmc->wakeup, KTIME_MAX);
1454 firstexp = __tmigr_cpu_deactivate(tmc, KTIME_MAX);
1455 trace_tmigr_cpu_offline(tmc);
1456 raw_spin_unlock_irq(&tmc->lock);
1468 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
1471 if (WARN_ON_ONCE(!tmc->tmgroup))
1474 raw_spin_lock_irq(&tmc->lock);
1475 trace_tmigr_cpu_online(tmc);
1476 tmc->idle = timer_base_is_idle();
1477 if (!tmc->idle)
1478 __tmigr_cpu_activate(tmc);
1479 tmc->online = true;
1480 raw_spin_unlock_irq(&tmc->lock);
1690 * Update tmc -> group / child -> group connection
1693 struct tmigr_cpu *tmc = per_cpu_ptr(&tmigr_cpu, cpu);
1697 tmc->tmgroup = group;
1698 tmc->groupmask = BIT(group->num_children++);
1702 trace_tmigr_connect_cpu_parent(tmc);
1763 struct tmigr_cpu *tmc = per_cpu_ptr(&tmigr_cpu, cpu);
1767 if (tmc->tmgroup)
1770 raw_spin_lock_init(&tmc->lock);
1771 timerqueue_init(&tmc->cpuevt.nextevt);
1772 tmc->cpuevt.nextevt.expires = KTIME_MAX;
1773 tmc->cpuevt.ignore = true;
1774 tmc->cpuevt.cpu = cpu;
1775 tmc->remote = false;
1776 WRITE_ONCE(tmc->wakeup, KTIME_MAX);
1782 if (tmc->groupmask == 0)