1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Common Primitives for Data Access Monitoring 4 * 5 * Author: SeongJae Park <sj@kernel.org> 6 */ 7 8 #include <linux/mmu_notifier.h> 9 #include <linux/page_idle.h> 10 #include <linux/pagemap.h> 11 #include <linux/rmap.h> 12 #include <linux/swap.h> 13 #include <linux/swapops.h> 14 15 #include "ops-common.h" 16 17 /* 18 * Get an online page for a pfn if it's in the LRU list. Otherwise, returns 19 * NULL. 20 * 21 * The body of this function is stolen from the 'page_idle_get_folio()'. We 22 * steal rather than reuse it because the code is quite simple. 23 */ 24 struct folio *damon_get_folio(unsigned long pfn) 25 { 26 struct page *page = pfn_to_online_page(pfn); 27 struct folio *folio; 28 29 if (!page) 30 return NULL; 31 32 folio = page_folio(page); 33 if (!folio_test_lru(folio) || !folio_try_get(folio)) 34 return NULL; 35 if (unlikely(page_folio(page) != folio || !folio_test_lru(folio))) { 36 folio_put(folio); 37 folio = NULL; 38 } 39 return folio; 40 } 41 42 void damon_ptep_mkold(pte_t *pte, struct vm_area_struct *vma, unsigned long addr) 43 { 44 pte_t pteval = ptep_get(pte); 45 struct folio *folio; 46 bool young = false; 47 unsigned long pfn; 48 49 if (likely(pte_present(pteval))) 50 pfn = pte_pfn(pteval); 51 else 52 pfn = swp_offset_pfn(pte_to_swp_entry(pteval)); 53 54 folio = damon_get_folio(pfn); 55 if (!folio) 56 return; 57 58 /* 59 * PFN swap PTEs, such as device-exclusive ones, that actually map pages 60 * are "old" from a CPU perspective. The MMU notifier takes care of any 61 * device aspects. 62 */ 63 if (likely(pte_present(pteval))) 64 young |= ptep_test_and_clear_young(vma, addr, pte); 65 young |= mmu_notifier_clear_young(vma->vm_mm, addr, addr + PAGE_SIZE); 66 if (young) 67 folio_set_young(folio); 68 69 folio_set_idle(folio); 70 folio_put(folio); 71 } 72 73 void damon_pmdp_mkold(pmd_t *pmd, struct vm_area_struct *vma, unsigned long addr) 74 { 75 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 76 struct folio *folio = damon_get_folio(pmd_pfn(pmdp_get(pmd))); 77 78 if (!folio) 79 return; 80 81 if (pmdp_clear_young_notify(vma, addr, pmd)) 82 folio_set_young(folio); 83 84 folio_set_idle(folio); 85 folio_put(folio); 86 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 87 } 88 89 #define DAMON_MAX_SUBSCORE (100) 90 #define DAMON_MAX_AGE_IN_LOG (32) 91 92 int damon_hot_score(struct damon_ctx *c, struct damon_region *r, 93 struct damos *s) 94 { 95 int freq_subscore; 96 unsigned int age_in_sec; 97 int age_in_log, age_subscore; 98 unsigned int freq_weight = s->quota.weight_nr_accesses; 99 unsigned int age_weight = s->quota.weight_age; 100 int hotness; 101 102 freq_subscore = r->nr_accesses * DAMON_MAX_SUBSCORE / 103 damon_max_nr_accesses(&c->attrs); 104 105 age_in_sec = (unsigned long)r->age * c->attrs.aggr_interval / 1000000; 106 for (age_in_log = 0; age_in_log < DAMON_MAX_AGE_IN_LOG && age_in_sec; 107 age_in_log++, age_in_sec >>= 1) 108 ; 109 110 /* If frequency is 0, higher age means it's colder */ 111 if (freq_subscore == 0) 112 age_in_log *= -1; 113 114 /* 115 * Now age_in_log is in [-DAMON_MAX_AGE_IN_LOG, DAMON_MAX_AGE_IN_LOG]. 116 * Scale it to be in [0, 100] and set it as age subscore. 117 */ 118 age_in_log += DAMON_MAX_AGE_IN_LOG; 119 age_subscore = age_in_log * DAMON_MAX_SUBSCORE / 120 DAMON_MAX_AGE_IN_LOG / 2; 121 122 hotness = (freq_weight * freq_subscore + age_weight * age_subscore); 123 if (freq_weight + age_weight) 124 hotness /= freq_weight + age_weight; 125 /* 126 * Transform it to fit in [0, DAMOS_MAX_SCORE] 127 */ 128 hotness = hotness * DAMOS_MAX_SCORE / DAMON_MAX_SUBSCORE; 129 130 return hotness; 131 } 132 133 int damon_cold_score(struct damon_ctx *c, struct damon_region *r, 134 struct damos *s) 135 { 136 int hotness = damon_hot_score(c, r, s); 137 138 /* Return coldness of the region */ 139 return DAMOS_MAX_SCORE - hotness; 140 } 141