1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Basic resctrl file system operations
4 *
5 * Copyright (C) 2018 Intel Corporation
6 *
7 * Authors:
8 * Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>,
9 * Fenghua Yu <fenghua.yu@intel.com>
10 */
11 #include <fcntl.h>
12 #include <limits.h>
13
14 #include "resctrl.h"
15
16 int snc_unreliable;
17
find_resctrl_mount(char * buffer)18 static int find_resctrl_mount(char *buffer)
19 {
20 FILE *mounts;
21 char line[256], *fs, *mntpoint;
22
23 mounts = fopen("/proc/mounts", "r");
24 if (!mounts) {
25 ksft_perror("/proc/mounts");
26 return -ENXIO;
27 }
28 while (!feof(mounts)) {
29 if (!fgets(line, 256, mounts))
30 break;
31 fs = strtok(line, " \t");
32 if (!fs)
33 continue;
34 mntpoint = strtok(NULL, " \t");
35 if (!mntpoint)
36 continue;
37 fs = strtok(NULL, " \t");
38 if (!fs)
39 continue;
40 if (strcmp(fs, "resctrl"))
41 continue;
42
43 fclose(mounts);
44 if (buffer)
45 strncpy(buffer, mntpoint, 256);
46
47 return 0;
48 }
49
50 fclose(mounts);
51
52 return -ENOENT;
53 }
54
55 /*
56 * mount_resctrlfs - Mount resctrl FS at /sys/fs/resctrl
57 *
58 * Mounts resctrl FS. Fails if resctrl FS is already mounted to avoid
59 * pre-existing settings interfering with the test results.
60 *
61 * Return: 0 on success, < 0 on error.
62 */
mount_resctrlfs(void)63 int mount_resctrlfs(void)
64 {
65 int ret;
66
67 ret = find_resctrl_mount(NULL);
68 if (ret != -ENOENT)
69 return -1;
70
71 ksft_print_msg("Mounting resctrl to \"%s\"\n", RESCTRL_PATH);
72 ret = mount("resctrl", RESCTRL_PATH, "resctrl", 0, NULL);
73 if (ret)
74 ksft_perror("mount");
75
76 return ret;
77 }
78
umount_resctrlfs(void)79 int umount_resctrlfs(void)
80 {
81 char mountpoint[256];
82 int ret;
83
84 ret = find_resctrl_mount(mountpoint);
85 if (ret == -ENOENT)
86 return 0;
87 if (ret)
88 return ret;
89
90 if (umount(mountpoint)) {
91 ksft_perror("Unable to umount resctrl");
92
93 return -1;
94 }
95
96 return 0;
97 }
98
99 /*
100 * get_cache_level - Convert cache level from string to integer
101 * @cache_type: Cache level as string
102 *
103 * Return: cache level as integer or -1 if @cache_type is invalid.
104 */
get_cache_level(const char * cache_type)105 static int get_cache_level(const char *cache_type)
106 {
107 if (!strcmp(cache_type, "L3"))
108 return 3;
109 if (!strcmp(cache_type, "L2"))
110 return 2;
111
112 ksft_print_msg("Invalid cache level\n");
113 return -1;
114 }
115
get_resource_cache_level(const char * resource)116 static int get_resource_cache_level(const char *resource)
117 {
118 /* "MB" use L3 (LLC) as resource */
119 if (!strcmp(resource, "MB"))
120 return 3;
121 return get_cache_level(resource);
122 }
123
124 /*
125 * get_domain_id - Get resctrl domain ID for a specified CPU
126 * @resource: resource name
127 * @cpu_no: CPU number
128 * @domain_id: domain ID (cache ID; for MB, L3 cache ID)
129 *
130 * Return: >= 0 on success, < 0 on failure.
131 */
get_domain_id(const char * resource,int cpu_no,int * domain_id)132 int get_domain_id(const char *resource, int cpu_no, int *domain_id)
133 {
134 char phys_pkg_path[1024];
135 int cache_num;
136 FILE *fp;
137
138 cache_num = get_resource_cache_level(resource);
139 if (cache_num < 0)
140 return cache_num;
141
142 sprintf(phys_pkg_path, "%s%d/cache/index%d/id", PHYS_ID_PATH, cpu_no, cache_num);
143
144 fp = fopen(phys_pkg_path, "r");
145 if (!fp) {
146 ksft_perror("Failed to open cache id file");
147
148 return -1;
149 }
150 if (fscanf(fp, "%d", domain_id) <= 0) {
151 ksft_perror("Could not get domain ID");
152 fclose(fp);
153
154 return -1;
155 }
156 fclose(fp);
157
158 return 0;
159 }
160
161 /*
162 * Count number of CPUs in a /sys bitmap
163 */
count_sys_bitmap_bits(char * name)164 static unsigned int count_sys_bitmap_bits(char *name)
165 {
166 FILE *fp = fopen(name, "r");
167 int count = 0, c;
168
169 if (!fp)
170 return 0;
171
172 while ((c = fgetc(fp)) != EOF) {
173 if (!isxdigit(c))
174 continue;
175 switch (c) {
176 case 'f':
177 count++;
178 fallthrough;
179 case '7': case 'b': case 'd': case 'e':
180 count++;
181 fallthrough;
182 case '3': case '5': case '6': case '9': case 'a': case 'c':
183 count++;
184 fallthrough;
185 case '1': case '2': case '4': case '8':
186 count++;
187 break;
188 }
189 }
190 fclose(fp);
191
192 return count;
193 }
194
cpus_offline_empty(void)195 static bool cpus_offline_empty(void)
196 {
197 char offline_cpus_str[64];
198 FILE *fp;
199
200 fp = fopen("/sys/devices/system/cpu/offline", "r");
201 if (!fp) {
202 ksft_perror("Could not open /sys/devices/system/cpu/offline");
203 return 0;
204 }
205
206 if (fscanf(fp, "%63s", offline_cpus_str) < 0) {
207 if (!errno) {
208 fclose(fp);
209 return 1;
210 }
211 ksft_perror("Could not read /sys/devices/system/cpu/offline");
212 }
213
214 fclose(fp);
215
216 return 0;
217 }
218
219 /*
220 * Detect SNC by comparing #CPUs in node0 with #CPUs sharing LLC with CPU0.
221 * If any CPUs are offline declare the detection as unreliable.
222 */
snc_nodes_per_l3_cache(void)223 int snc_nodes_per_l3_cache(void)
224 {
225 int node_cpus, cache_cpus;
226 static int snc_mode;
227
228 if (!snc_mode) {
229 snc_mode = 1;
230 if (!cpus_offline_empty()) {
231 ksft_print_msg("Runtime SNC detection unreliable due to offline CPUs.\n");
232 ksft_print_msg("Setting SNC mode to disabled.\n");
233 snc_unreliable = 1;
234 return snc_mode;
235 }
236 node_cpus = count_sys_bitmap_bits("/sys/devices/system/node/node0/cpumap");
237 cache_cpus = count_sys_bitmap_bits("/sys/devices/system/cpu/cpu0/cache/index3/shared_cpu_map");
238
239 if (!node_cpus || !cache_cpus) {
240 ksft_print_msg("Could not determine Sub-NUMA Cluster mode.\n");
241 snc_unreliable = 1;
242 return snc_mode;
243 }
244 snc_mode = cache_cpus / node_cpus;
245
246 /*
247 * On some platforms (e.g. Hygon),
248 * cache_cpus < node_cpus, the calculated snc_mode is 0.
249 *
250 * Set snc_mode = 1 to indicate that SNC mode is not
251 * supported on the platform.
252 */
253 if (!snc_mode)
254 snc_mode = 1;
255
256 if (snc_mode > 1)
257 ksft_print_msg("SNC-%d mode discovered.\n", snc_mode);
258 }
259
260 return snc_mode;
261 }
262
263 /*
264 * get_cache_size - Get cache size for a specified CPU
265 * @cpu_no: CPU number
266 * @cache_type: Cache level L2/L3
267 * @cache_size: pointer to cache_size
268 *
269 * Return: = 0 on success, < 0 on failure.
270 */
get_cache_size(int cpu_no,const char * cache_type,unsigned long * cache_size)271 int get_cache_size(int cpu_no, const char *cache_type, unsigned long *cache_size)
272 {
273 char cache_path[1024], cache_str[64];
274 int length, i, cache_num;
275 FILE *fp;
276
277 cache_num = get_cache_level(cache_type);
278 if (cache_num < 0)
279 return cache_num;
280
281 sprintf(cache_path, "/sys/bus/cpu/devices/cpu%d/cache/index%d/size",
282 cpu_no, cache_num);
283 fp = fopen(cache_path, "r");
284 if (!fp) {
285 ksft_perror("Failed to open cache size");
286
287 return -1;
288 }
289 if (fscanf(fp, "%63s", cache_str) <= 0) {
290 ksft_perror("Could not get cache_size");
291 fclose(fp);
292
293 return -1;
294 }
295 fclose(fp);
296
297 length = (int)strlen(cache_str);
298
299 *cache_size = 0;
300
301 for (i = 0; i < length; i++) {
302 if ((cache_str[i] >= '0') && (cache_str[i] <= '9'))
303
304 *cache_size = *cache_size * 10 + (cache_str[i] - '0');
305
306 else if (cache_str[i] == 'K')
307
308 *cache_size = *cache_size * 1024;
309
310 else if (cache_str[i] == 'M')
311
312 *cache_size = *cache_size * 1024 * 1024;
313
314 else
315 break;
316 }
317
318 /*
319 * The amount of cache represented by each bit in the masks
320 * in the schemata file is reduced by a factor equal to SNC
321 * nodes per L3 cache.
322 * E.g. on a SNC-2 system with a 100MB L3 cache a test that
323 * allocates memory from its local SNC node (default behavior
324 * without using libnuma) will only see 50 MB llc_occupancy
325 * with a fully populated L3 mask in the schemata file.
326 */
327 if (cache_num == 3)
328 *cache_size /= snc_nodes_per_l3_cache();
329 return 0;
330 }
331
332 #define CORE_SIBLINGS_PATH "/sys/bus/cpu/devices/cpu"
333
334 /*
335 * get_bit_mask - Get bit mask from given file
336 * @filename: File containing the mask
337 * @mask: The bit mask returned as unsigned long
338 *
339 * Return: = 0 on success, < 0 on failure.
340 */
get_bit_mask(const char * filename,unsigned long * mask)341 static int get_bit_mask(const char *filename, unsigned long *mask)
342 {
343 FILE *fp;
344
345 if (!filename || !mask)
346 return -1;
347
348 fp = fopen(filename, "r");
349 if (!fp) {
350 ksft_print_msg("Failed to open bit mask file '%s': %s\n",
351 filename, strerror(errno));
352 return -1;
353 }
354
355 if (fscanf(fp, "%lx", mask) <= 0) {
356 ksft_print_msg("Could not read bit mask file '%s': %s\n",
357 filename, strerror(errno));
358 fclose(fp);
359
360 return -1;
361 }
362 fclose(fp);
363
364 return 0;
365 }
366
367 /*
368 * resource_info_unsigned_get - Read an unsigned value from
369 * /sys/fs/resctrl/info/@resource/@filename
370 * @resource: Resource name that matches directory name in
371 * /sys/fs/resctrl/info
372 * @filename: File in /sys/fs/resctrl/info/@resource
373 * @val: Contains read value on success.
374 *
375 * Return: = 0 on success, < 0 on failure. On success the read
376 * value is saved into @val.
377 */
resource_info_unsigned_get(const char * resource,const char * filename,unsigned int * val)378 int resource_info_unsigned_get(const char *resource, const char *filename,
379 unsigned int *val)
380 {
381 char file_path[PATH_MAX];
382 FILE *fp;
383
384 snprintf(file_path, sizeof(file_path), "%s/%s/%s", INFO_PATH, resource,
385 filename);
386
387 fp = fopen(file_path, "r");
388 if (!fp) {
389 ksft_print_msg("Error opening %s: %m\n", file_path);
390 return -1;
391 }
392
393 if (fscanf(fp, "%u", val) <= 0) {
394 ksft_print_msg("Could not get contents of %s: %m\n", file_path);
395 fclose(fp);
396 return -1;
397 }
398
399 fclose(fp);
400 return 0;
401 }
402
403 /*
404 * create_bit_mask- Create bit mask from start, len pair
405 * @start: LSB of the mask
406 * @len Number of bits in the mask
407 */
create_bit_mask(unsigned int start,unsigned int len)408 unsigned long create_bit_mask(unsigned int start, unsigned int len)
409 {
410 return ((1UL << len) - 1UL) << start;
411 }
412
413 /*
414 * count_contiguous_bits - Returns the longest train of bits in a bit mask
415 * @val A bit mask
416 * @start The location of the least-significant bit of the longest train
417 *
418 * Return: The length of the contiguous bits in the longest train of bits
419 */
count_contiguous_bits(unsigned long val,unsigned int * start)420 unsigned int count_contiguous_bits(unsigned long val, unsigned int *start)
421 {
422 unsigned long last_val;
423 unsigned int count = 0;
424
425 while (val) {
426 last_val = val;
427 val &= (val >> 1);
428 count++;
429 }
430
431 if (start) {
432 if (count)
433 *start = ffsl(last_val) - 1;
434 else
435 *start = 0;
436 }
437
438 return count;
439 }
440
441 /*
442 * get_full_cbm - Get full Cache Bit Mask (CBM)
443 * @cache_type: Cache type as "L2" or "L3"
444 * @mask: Full cache bit mask representing the maximal portion of cache
445 * available for allocation, returned as unsigned long.
446 *
447 * Return: = 0 on success, < 0 on failure.
448 */
get_full_cbm(const char * cache_type,unsigned long * mask)449 int get_full_cbm(const char *cache_type, unsigned long *mask)
450 {
451 char cbm_path[PATH_MAX];
452 int ret;
453
454 if (!cache_type)
455 return -1;
456
457 snprintf(cbm_path, sizeof(cbm_path), "%s/%s/cbm_mask",
458 INFO_PATH, cache_type);
459
460 ret = get_bit_mask(cbm_path, mask);
461 if (ret || !*mask)
462 return -1;
463
464 return 0;
465 }
466
467 /*
468 * get_shareable_mask - Get shareable mask from shareable_bits
469 * @cache_type: Cache type as "L2" or "L3"
470 * @shareable_mask: Shareable mask returned as unsigned long
471 *
472 * Return: = 0 on success, < 0 on failure.
473 */
get_shareable_mask(const char * cache_type,unsigned long * shareable_mask)474 static int get_shareable_mask(const char *cache_type, unsigned long *shareable_mask)
475 {
476 char mask_path[PATH_MAX];
477
478 if (!cache_type)
479 return -1;
480
481 snprintf(mask_path, sizeof(mask_path), "%s/%s/shareable_bits",
482 INFO_PATH, cache_type);
483
484 return get_bit_mask(mask_path, shareable_mask);
485 }
486
487 /*
488 * get_mask_no_shareable - Get Cache Bit Mask (CBM) without shareable bits
489 * @cache_type: Cache type as "L2" or "L3"
490 * @mask: The largest exclusive portion of the cache out of the
491 * full CBM, returned as unsigned long
492 *
493 * Parts of a cache may be shared with other devices such as GPU. This function
494 * calculates the largest exclusive portion of the cache where no other devices
495 * besides CPU have access to the cache portion.
496 *
497 * Return: = 0 on success, < 0 on failure.
498 */
get_mask_no_shareable(const char * cache_type,unsigned long * mask)499 int get_mask_no_shareable(const char *cache_type, unsigned long *mask)
500 {
501 unsigned long full_mask, shareable_mask;
502 unsigned int start, len;
503
504 if (get_full_cbm(cache_type, &full_mask) < 0)
505 return -1;
506 if (get_shareable_mask(cache_type, &shareable_mask) < 0)
507 return -1;
508
509 len = count_contiguous_bits(full_mask & ~shareable_mask, &start);
510 if (!len)
511 return -1;
512
513 *mask = create_bit_mask(start, len);
514
515 return 0;
516 }
517
518 /*
519 * taskset_benchmark - Taskset PID (i.e. benchmark) to a specified cpu
520 * @bm_pid: PID that should be binded
521 * @cpu_no: CPU number at which the PID would be binded
522 * @old_affinity: When not NULL, set to old CPU affinity
523 *
524 * Return: 0 on success, < 0 on error.
525 */
taskset_benchmark(pid_t bm_pid,int cpu_no,cpu_set_t * old_affinity)526 int taskset_benchmark(pid_t bm_pid, int cpu_no, cpu_set_t *old_affinity)
527 {
528 cpu_set_t my_set;
529
530 if (old_affinity) {
531 CPU_ZERO(old_affinity);
532 if (sched_getaffinity(bm_pid, sizeof(*old_affinity),
533 old_affinity)) {
534 ksft_perror("Unable to read CPU affinity");
535 return -1;
536 }
537 }
538
539 CPU_ZERO(&my_set);
540 CPU_SET(cpu_no, &my_set);
541
542 if (sched_setaffinity(bm_pid, sizeof(cpu_set_t), &my_set)) {
543 ksft_perror("Unable to taskset benchmark");
544
545 return -1;
546 }
547
548 return 0;
549 }
550
551 /*
552 * taskset_restore - Taskset PID to the earlier CPU affinity
553 * @bm_pid: PID that should be reset
554 * @old_affinity: The old CPU affinity to restore
555 *
556 * Return: 0 on success, < 0 on error.
557 */
taskset_restore(pid_t bm_pid,cpu_set_t * old_affinity)558 int taskset_restore(pid_t bm_pid, cpu_set_t *old_affinity)
559 {
560 if (sched_setaffinity(bm_pid, sizeof(*old_affinity), old_affinity)) {
561 ksft_perror("Unable to restore CPU affinity");
562 return -1;
563 }
564
565 return 0;
566 }
567
568 /*
569 * create_grp - Create a group only if one doesn't exist
570 * @grp_name: Name of the group
571 * @grp: Full path and name of the group
572 * @parent_grp: Full path and name of the parent group
573 *
574 * Creates a group @grp_name if it does not exist yet. If @grp_name is NULL,
575 * it is interpreted as the root group which always results in success.
576 *
577 * Return: 0 on success, < 0 on error.
578 */
create_grp(const char * grp_name,char * grp,const char * parent_grp)579 static int create_grp(const char *grp_name, char *grp, const char *parent_grp)
580 {
581 int found_grp = 0;
582 struct dirent *ep;
583 DIR *dp;
584
585 if (!grp_name)
586 return 0;
587
588 /* Check if requested grp exists or not */
589 dp = opendir(parent_grp);
590 if (dp) {
591 while ((ep = readdir(dp)) != NULL) {
592 if (strcmp(ep->d_name, grp_name) == 0)
593 found_grp = 1;
594 }
595 closedir(dp);
596 } else {
597 ksft_perror("Unable to open resctrl for group");
598
599 return -1;
600 }
601
602 /* Requested grp doesn't exist, hence create it */
603 if (found_grp == 0) {
604 if (mkdir(grp, 0) == -1) {
605 ksft_perror("Unable to create group");
606
607 return -1;
608 }
609 }
610
611 return 0;
612 }
613
write_pid_to_tasks(char * tasks,pid_t pid)614 static int write_pid_to_tasks(char *tasks, pid_t pid)
615 {
616 FILE *fp;
617
618 fp = fopen(tasks, "w");
619 if (!fp) {
620 ksft_perror("Failed to open tasks file");
621
622 return -1;
623 }
624 if (fprintf(fp, "%d\n", (int)pid) < 0) {
625 ksft_print_msg("Failed to write pid to tasks file\n");
626 fclose(fp);
627
628 return -1;
629 }
630 fclose(fp);
631
632 return 0;
633 }
634
635 /*
636 * write_bm_pid_to_resctrl - Write a PID (i.e. benchmark) to resctrl FS
637 * @bm_pid: PID that should be written
638 * @ctrlgrp: Name of the control monitor group (con_mon grp)
639 * @mongrp: Name of the monitor group (mon grp)
640 *
641 * If a con_mon grp is requested, create it and write pid to it, otherwise
642 * write pid to root con_mon grp.
643 * If a mon grp is requested, create it and write pid to it, otherwise
644 * pid is not written, this means that pid is in con_mon grp and hence
645 * should consult con_mon grp's mon_data directory for results.
646 *
647 * Return: 0 on success, < 0 on error.
648 */
write_bm_pid_to_resctrl(pid_t bm_pid,const char * ctrlgrp,const char * mongrp)649 int write_bm_pid_to_resctrl(pid_t bm_pid, const char *ctrlgrp, const char *mongrp)
650 {
651 char controlgroup[128], monitorgroup[512], monitorgroup_p[256];
652 char tasks[1024];
653 int ret = 0;
654
655 if (ctrlgrp)
656 sprintf(controlgroup, "%s/%s", RESCTRL_PATH, ctrlgrp);
657 else
658 sprintf(controlgroup, "%s", RESCTRL_PATH);
659
660 /* Create control and monitoring group and write pid into it */
661 ret = create_grp(ctrlgrp, controlgroup, RESCTRL_PATH);
662 if (ret)
663 goto out;
664 sprintf(tasks, "%s/tasks", controlgroup);
665 ret = write_pid_to_tasks(tasks, bm_pid);
666 if (ret)
667 goto out;
668
669 /* Create monitor group and write pid into if it is used */
670 if (mongrp) {
671 sprintf(monitorgroup_p, "%s/mon_groups", controlgroup);
672 sprintf(monitorgroup, "%s/%s", monitorgroup_p, mongrp);
673 ret = create_grp(mongrp, monitorgroup, monitorgroup_p);
674 if (ret)
675 goto out;
676
677 sprintf(tasks, "%s/mon_groups/%s/tasks",
678 controlgroup, mongrp);
679 ret = write_pid_to_tasks(tasks, bm_pid);
680 if (ret)
681 goto out;
682 }
683
684 out:
685 ksft_print_msg("Writing benchmark parameters to resctrl FS\n");
686 if (ret)
687 ksft_print_msg("Failed writing to resctrlfs\n");
688
689 return ret;
690 }
691
692 /*
693 * write_schemata - Update schemata of a con_mon grp
694 * @ctrlgrp: Name of the con_mon grp
695 * @schemata: Schemata that should be updated to
696 * @cpu_no: CPU number that the benchmark PID is binded to
697 * @resource: Resctrl resource (Eg: MB, L3, L2, etc.)
698 *
699 * Update schemata of a con_mon grp *only* if requested resctrl resource is
700 * allocation type
701 *
702 * Return: 0 on success, < 0 on error.
703 */
write_schemata(const char * ctrlgrp,char * schemata,int cpu_no,const char * resource)704 int write_schemata(const char *ctrlgrp, char *schemata, int cpu_no,
705 const char *resource)
706 {
707 char controlgroup[1024], reason[128], schema[1024] = {};
708 int domain_id, fd, schema_len, ret = 0;
709
710 if (!schemata) {
711 ksft_print_msg("Skipping empty schemata update\n");
712
713 return -1;
714 }
715
716 if (get_domain_id(resource, cpu_no, &domain_id) < 0) {
717 sprintf(reason, "Failed to get domain ID");
718 ret = -1;
719
720 goto out;
721 }
722
723 if (ctrlgrp)
724 sprintf(controlgroup, "%s/%s/schemata", RESCTRL_PATH, ctrlgrp);
725 else
726 sprintf(controlgroup, "%s/schemata", RESCTRL_PATH);
727
728 schema_len = snprintf(schema, sizeof(schema), "%s:%d=%s\n",
729 resource, domain_id, schemata);
730 if (schema_len < 0 || schema_len >= sizeof(schema)) {
731 snprintf(reason, sizeof(reason),
732 "snprintf() failed with return value : %d", schema_len);
733 ret = -1;
734 goto out;
735 }
736
737 fd = open(controlgroup, O_WRONLY);
738 if (fd < 0) {
739 snprintf(reason, sizeof(reason),
740 "open() failed : %s", strerror(errno));
741 ret = -1;
742
743 goto err_schema_not_empty;
744 }
745 if (write(fd, schema, schema_len) < 0) {
746 snprintf(reason, sizeof(reason),
747 "write() failed : %s", strerror(errno));
748 close(fd);
749 ret = -1;
750
751 goto err_schema_not_empty;
752 }
753 close(fd);
754
755 err_schema_not_empty:
756 schema[schema_len - 1] = 0;
757 out:
758 ksft_print_msg("Write schema \"%s\" to resctrl FS%s%s\n",
759 schema, ret ? " # " : "",
760 ret ? reason : "");
761
762 return ret;
763 }
764
check_resctrlfs_support(void)765 bool check_resctrlfs_support(void)
766 {
767 FILE *inf = fopen("/proc/filesystems", "r");
768 DIR *dp;
769 char *res;
770 bool ret = false;
771
772 if (!inf)
773 return false;
774
775 res = fgrep(inf, "nodev\tresctrl\n");
776
777 if (res) {
778 ret = true;
779 free(res);
780 }
781
782 fclose(inf);
783
784 ksft_print_msg("%s Check kernel supports resctrl filesystem\n",
785 ret ? "Pass:" : "Fail:");
786
787 if (!ret)
788 return ret;
789
790 dp = opendir(RESCTRL_PATH);
791 ksft_print_msg("%s Check resctrl mountpoint \"%s\" exists\n",
792 dp ? "Pass:" : "Fail:", RESCTRL_PATH);
793 if (dp)
794 closedir(dp);
795
796 ksft_print_msg("resctrl filesystem %s mounted\n",
797 find_resctrl_mount(NULL) ? "not" : "is");
798
799 return ret;
800 }
801
fgrep(FILE * inf,const char * str)802 char *fgrep(FILE *inf, const char *str)
803 {
804 char line[256];
805 int slen = strlen(str);
806
807 while (!feof(inf)) {
808 if (!fgets(line, 256, inf))
809 break;
810 if (strncmp(line, str, slen))
811 continue;
812
813 return strdup(line);
814 }
815
816 return NULL;
817 }
818
819 /*
820 * resctrl_resource_exists - Check if a resource is supported.
821 * @resource: Resctrl resource (e.g., MB, L3, L2, L3_MON, etc.)
822 *
823 * Return: True if the resource is supported, else false. False is
824 * also returned if resctrl FS is not mounted.
825 */
resctrl_resource_exists(const char * resource)826 bool resctrl_resource_exists(const char *resource)
827 {
828 char res_path[PATH_MAX];
829 struct stat statbuf;
830 int ret;
831
832 if (!resource)
833 return false;
834
835 ret = find_resctrl_mount(NULL);
836 if (ret)
837 return false;
838
839 snprintf(res_path, sizeof(res_path), "%s/%s", INFO_PATH, resource);
840
841 if (stat(res_path, &statbuf))
842 return false;
843
844 return true;
845 }
846
847 /*
848 * resctrl_mon_feature_exists - Check if requested monitoring feature is valid.
849 * @resource: Resource that uses the mon_features file. Currently only L3_MON
850 * is valid.
851 * @feature: Required monitor feature (in mon_features file).
852 *
853 * Return: True if the feature is supported, else false.
854 */
resctrl_mon_feature_exists(const char * resource,const char * feature)855 bool resctrl_mon_feature_exists(const char *resource, const char *feature)
856 {
857 char res_path[PATH_MAX];
858 char *res;
859 FILE *inf;
860
861 if (!feature || !resource)
862 return false;
863
864 snprintf(res_path, sizeof(res_path), "%s/%s/mon_features", INFO_PATH, resource);
865 inf = fopen(res_path, "r");
866 if (!inf)
867 return false;
868
869 res = fgrep(inf, feature);
870 free(res);
871 fclose(inf);
872
873 return !!res;
874 }
875
876 /*
877 * resource_info_file_exists - Check if a file is present inside
878 * /sys/fs/resctrl/info/@resource.
879 * @resource: Required resource (Eg: MB, L3, L2, etc.)
880 * @file: Required file.
881 *
882 * Return: True if the /sys/fs/resctrl/info/@resource/@file exists, else false.
883 */
resource_info_file_exists(const char * resource,const char * file)884 bool resource_info_file_exists(const char *resource, const char *file)
885 {
886 char res_path[PATH_MAX];
887 struct stat statbuf;
888
889 if (!file || !resource)
890 return false;
891
892 snprintf(res_path, sizeof(res_path), "%s/%s/%s", INFO_PATH, resource,
893 file);
894
895 if (stat(res_path, &statbuf))
896 return false;
897
898 return true;
899 }
900
test_resource_feature_check(const struct resctrl_test * test)901 bool test_resource_feature_check(const struct resctrl_test *test)
902 {
903 return resctrl_resource_exists(test->resource);
904 }
905
filter_dmesg(void)906 int filter_dmesg(void)
907 {
908 char line[1024];
909 FILE *fp;
910 int pipefds[2];
911 pid_t pid;
912 int ret;
913
914 ret = pipe(pipefds);
915 if (ret) {
916 ksft_perror("pipe");
917 return ret;
918 }
919 fflush(stdout);
920 pid = fork();
921 if (pid == 0) {
922 close(pipefds[0]);
923 dup2(pipefds[1], STDOUT_FILENO);
924 execlp("dmesg", "dmesg", NULL);
925 ksft_perror("Executing dmesg");
926 exit(1);
927 }
928 close(pipefds[1]);
929 fp = fdopen(pipefds[0], "r");
930 if (!fp) {
931 ksft_perror("fdopen(pipe)");
932 kill(pid, SIGTERM);
933
934 return -1;
935 }
936
937 while (fgets(line, 1024, fp)) {
938 if (strstr(line, "intel_rdt:"))
939 ksft_print_msg("dmesg: %s", line);
940 if (strstr(line, "resctrl:"))
941 ksft_print_msg("dmesg: %s", line);
942 }
943 fclose(fp);
944 waitpid(pid, NULL, 0);
945
946 return 0;
947 }
948
perf_event_open(struct perf_event_attr * hw_event,pid_t pid,int cpu,int group_fd,unsigned long flags)949 int perf_event_open(struct perf_event_attr *hw_event, pid_t pid, int cpu,
950 int group_fd, unsigned long flags)
951 {
952 int ret;
953
954 ret = syscall(__NR_perf_event_open, hw_event, pid, cpu,
955 group_fd, flags);
956 return ret;
957 }
958
count_bits(unsigned long n)959 unsigned int count_bits(unsigned long n)
960 {
961 unsigned int count = 0;
962
963 while (n) {
964 count += n & 1;
965 n >>= 1;
966 }
967
968 return count;
969 }
970
971 /**
972 * snc_kernel_support - Check for existence of mon_sub_L3_00 file that indicates
973 * SNC resctrl support on the kernel side.
974 *
975 * Return: 0 if not supported, 1 if SNC is disabled or SNC discovery is
976 * unreliable or SNC is both enabled and supported.
977 */
snc_kernel_support(void)978 int snc_kernel_support(void)
979 {
980 char node_path[PATH_MAX];
981 struct stat statbuf;
982 int ret;
983
984 ret = snc_nodes_per_l3_cache();
985 /*
986 * If SNC is disabled then its kernel support isn't important. If SNC
987 * got disabled because the discovery process was unreliable the
988 * snc_unreliable variable was set. It can be used to verify the SNC
989 * discovery reliability elsewhere in the selftest.
990 */
991 if (ret == 1)
992 return ret;
993
994 snprintf(node_path, sizeof(node_path), "%s/%s", RESCTRL_PATH,
995 "mon_data/mon_L3_00/mon_sub_L3_00");
996
997 if (!stat(node_path, &statbuf))
998 return 1;
999
1000 return 0;
1001 }
1002