1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * memory tiering: migrate cold pages in node 0 and hot pages in node 1 to node
4 * 1 and node 0, respectively. Adjust the hotness/coldness threshold aiming
5 * resulting 99.6 % node 0 utilization ratio.
6 */
7
8 #define pr_fmt(fmt) "damon_sample_mtier: " fmt
9
10 #include <linux/damon.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14
15 #ifdef MODULE_PARAM_PREFIX
16 #undef MODULE_PARAM_PREFIX
17 #endif
18 #define MODULE_PARAM_PREFIX "damon_sample_mtier."
19
20 static unsigned long node0_start_addr __read_mostly;
21 module_param(node0_start_addr, ulong, 0600);
22
23 static unsigned long node0_end_addr __read_mostly;
24 module_param(node0_end_addr, ulong, 0600);
25
26 static unsigned long node1_start_addr __read_mostly;
27 module_param(node1_start_addr, ulong, 0600);
28
29 static unsigned long node1_end_addr __read_mostly;
30 module_param(node1_end_addr, ulong, 0600);
31
32 static unsigned long node0_mem_used_bp __read_mostly = 9970;
33 module_param(node0_mem_used_bp, ulong, 0600);
34
35 static unsigned long node0_mem_free_bp __read_mostly = 50;
36 module_param(node0_mem_free_bp, ulong, 0600);
37
38 static int damon_sample_mtier_enable_store(
39 const char *val, const struct kernel_param *kp);
40
41 static const struct kernel_param_ops enabled_param_ops = {
42 .set = damon_sample_mtier_enable_store,
43 .get = param_get_bool,
44 };
45
46 static bool enabled __read_mostly;
47 module_param_cb(enabled, &enabled_param_ops, &enabled, 0600);
48 MODULE_PARM_DESC(enabled, "Enable or disable DAMON_SAMPLE_MTIER");
49
50 static bool detect_node_addresses __read_mostly;
51 module_param(detect_node_addresses, bool, 0600);
52
53 static struct damon_ctx *ctxs[2];
54
55 struct region_range {
56 phys_addr_t start;
57 phys_addr_t end;
58 };
59
nid_to_phys(int target_node,struct region_range * range)60 static int nid_to_phys(int target_node, struct region_range *range)
61 {
62 if (!node_online(target_node)) {
63 pr_err("NUMA node %d is not online\n", target_node);
64 return -EINVAL;
65 }
66
67 range->start = PFN_PHYS(node_start_pfn(target_node));
68 range->end = PFN_PHYS(node_end_pfn(target_node));
69
70 return 0;
71 }
72
damon_sample_mtier_build_ctx(bool promote)73 static struct damon_ctx *damon_sample_mtier_build_ctx(bool promote)
74 {
75 struct damon_ctx *ctx;
76 struct damon_attrs attrs;
77 struct damon_target *target;
78 struct damon_region *region;
79 struct damos *scheme;
80 struct damos_quota_goal *quota_goal;
81 struct damos_filter *filter;
82 struct region_range addr;
83 int ret;
84
85 ctx = damon_new_ctx();
86 if (!ctx)
87 return NULL;
88 attrs = (struct damon_attrs) {
89 .sample_interval = 5 * USEC_PER_MSEC,
90 .aggr_interval = 100 * USEC_PER_MSEC,
91 .ops_update_interval = 60 * USEC_PER_MSEC * MSEC_PER_SEC,
92 .min_nr_regions = 10,
93 .max_nr_regions = 1000,
94 };
95
96 /*
97 * auto-tune sampling and aggregation interval aiming 4% DAMON-observed
98 * accesses ratio, keeping sampling interval in [5ms, 10s] range.
99 */
100 attrs.intervals_goal = (struct damon_intervals_goal) {
101 .access_bp = 400, .aggrs = 3,
102 .min_sample_us = 5000, .max_sample_us = 10000000,
103 };
104 if (damon_set_attrs(ctx, &attrs))
105 goto free_out;
106 if (damon_select_ops(ctx, DAMON_OPS_PADDR))
107 goto free_out;
108
109 target = damon_new_target();
110 if (!target)
111 goto free_out;
112 damon_add_target(ctx, target);
113
114 if (detect_node_addresses) {
115 ret = promote ? nid_to_phys(1, &addr) : nid_to_phys(0, &addr);
116 if (ret)
117 goto free_out;
118 } else {
119 addr.start = promote ? node1_start_addr : node0_start_addr;
120 addr.end = promote ? node1_end_addr : node0_end_addr;
121 }
122
123 region = damon_new_region(addr.start, addr.end);
124 if (!region)
125 goto free_out;
126 damon_add_region(region, target);
127
128 scheme = damon_new_scheme(
129 /* access pattern */
130 &(struct damos_access_pattern) {
131 .min_sz_region = PAGE_SIZE,
132 .max_sz_region = ULONG_MAX,
133 .min_nr_accesses = promote ? 1 : 0,
134 .max_nr_accesses = promote ? UINT_MAX : 0,
135 .min_age_region = 0,
136 .max_age_region = UINT_MAX},
137 /* action */
138 promote ? DAMOS_MIGRATE_HOT : DAMOS_MIGRATE_COLD,
139 1000000, /* apply interval (1s) */
140 &(struct damos_quota){
141 /* 200 MiB per sec by most */
142 .reset_interval = 1000,
143 .sz = 200 * 1024 * 1024,
144 /* ignore size of region when prioritizing */
145 .weight_sz = 0,
146 .weight_nr_accesses = 100,
147 .weight_age = 100,
148 },
149 &(struct damos_watermarks){},
150 promote ? 0 : 1); /* migrate target node id */
151 if (!scheme)
152 goto free_out;
153 damon_set_schemes(ctx, &scheme, 1);
154 quota_goal = damos_new_quota_goal(
155 promote ? DAMOS_QUOTA_NODE_MEM_USED_BP :
156 DAMOS_QUOTA_NODE_MEM_FREE_BP,
157 promote ? node0_mem_used_bp : node0_mem_free_bp);
158 if (!quota_goal)
159 goto free_out;
160 quota_goal->nid = 0;
161 damos_add_quota_goal(&scheme->quota, quota_goal);
162 filter = damos_new_filter(DAMOS_FILTER_TYPE_YOUNG, true, promote);
163 if (!filter)
164 goto free_out;
165 damos_add_filter(scheme, filter);
166 return ctx;
167 free_out:
168 damon_destroy_ctx(ctx);
169 return NULL;
170 }
171
damon_sample_mtier_start(void)172 static int damon_sample_mtier_start(void)
173 {
174 struct damon_ctx *ctx;
175
176 ctx = damon_sample_mtier_build_ctx(true);
177 if (!ctx)
178 return -ENOMEM;
179 ctxs[0] = ctx;
180 ctx = damon_sample_mtier_build_ctx(false);
181 if (!ctx) {
182 damon_destroy_ctx(ctxs[0]);
183 return -ENOMEM;
184 }
185 ctxs[1] = ctx;
186 return damon_start(ctxs, 2, true);
187 }
188
damon_sample_mtier_stop(void)189 static void damon_sample_mtier_stop(void)
190 {
191 damon_stop(ctxs, 2);
192 damon_destroy_ctx(ctxs[0]);
193 damon_destroy_ctx(ctxs[1]);
194 }
195
196 static bool init_called;
197
damon_sample_mtier_enable_store(const char * val,const struct kernel_param * kp)198 static int damon_sample_mtier_enable_store(
199 const char *val, const struct kernel_param *kp)
200 {
201 bool is_enabled = enabled;
202 int err;
203
204 err = kstrtobool(val, &enabled);
205 if (err)
206 return err;
207
208 if (enabled == is_enabled)
209 return 0;
210
211 if (enabled) {
212 err = damon_sample_mtier_start();
213 if (err)
214 enabled = false;
215 return err;
216 }
217 damon_sample_mtier_stop();
218 return 0;
219 }
220
damon_sample_mtier_init(void)221 static int __init damon_sample_mtier_init(void)
222 {
223 int err = 0;
224
225 init_called = true;
226 if (enabled) {
227 err = damon_sample_mtier_start();
228 if (err)
229 enabled = false;
230 }
231 return 0;
232 }
233
234 module_init(damon_sample_mtier_init);
235