1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <linux/kconfig.h>
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <inttypes.h>
7 #include <stdlib.h>
8 #include <unistd.h>
9 #include <stdio.h>
10 #include <string.h>
11 #include <sys/param.h>
12 #include <sys/utsname.h>
13 #include <perf/cpumap.h>
14 #include <perf/evlist.h>
15 #include <perf/mmap.h>
16
17 #include "debug.h"
18 #include "dso.h"
19 #include "env.h"
20 #include "parse-events.h"
21 #include "evlist.h"
22 #include "evsel.h"
23 #include "thread_map.h"
24 #include "machine.h"
25 #include "map.h"
26 #include "symbol.h"
27 #include "event.h"
28 #include "record.h"
29 #include "util/mmap.h"
30 #include "util/string2.h"
31 #include "util/synthetic-events.h"
32 #include "util/util.h"
33 #include "thread.h"
34
35 #include "tests.h"
36
37 #include <linux/ctype.h>
38
39 #define BUFSZ 1024
40 #define READLEN 128
41
42 struct state {
43 u64 done[1024];
44 size_t done_cnt;
45 };
46
read_objdump_chunk(const char ** line,unsigned char ** buf,size_t * buf_len)47 static size_t read_objdump_chunk(const char **line, unsigned char **buf,
48 size_t *buf_len)
49 {
50 size_t bytes_read = 0;
51 unsigned char *chunk_start = *buf;
52
53 /* Read bytes */
54 while (*buf_len > 0) {
55 char c1, c2;
56
57 /* Get 2 hex digits */
58 c1 = *(*line)++;
59 if (!isxdigit(c1))
60 break;
61 c2 = *(*line)++;
62 if (!isxdigit(c2))
63 break;
64
65 /* Store byte and advance buf */
66 **buf = (hex(c1) << 4) | hex(c2);
67 (*buf)++;
68 (*buf_len)--;
69 bytes_read++;
70
71 /* End of chunk? */
72 if (isspace(**line))
73 break;
74 }
75
76 /*
77 * objdump will display raw insn as LE if code endian
78 * is LE and bytes_per_chunk > 1. In that case reverse
79 * the chunk we just read.
80 *
81 * see disassemble_bytes() at binutils/objdump.c for details
82 * how objdump chooses display endian)
83 */
84 if (bytes_read > 1 && !host_is_bigendian()) {
85 unsigned char *chunk_end = chunk_start + bytes_read - 1;
86 unsigned char tmp;
87
88 while (chunk_start < chunk_end) {
89 tmp = *chunk_start;
90 *chunk_start = *chunk_end;
91 *chunk_end = tmp;
92 chunk_start++;
93 chunk_end--;
94 }
95 }
96
97 return bytes_read;
98 }
99
read_objdump_line(const char * line,unsigned char * buf,size_t buf_len)100 static size_t read_objdump_line(const char *line, unsigned char *buf,
101 size_t buf_len)
102 {
103 const char *p;
104 size_t ret, bytes_read = 0;
105
106 /* Skip to a colon */
107 p = strchr(line, ':');
108 if (!p)
109 return 0;
110 p++;
111
112 /* Skip initial spaces */
113 while (*p) {
114 if (!isspace(*p))
115 break;
116 p++;
117 }
118
119 do {
120 ret = read_objdump_chunk(&p, &buf, &buf_len);
121 bytes_read += ret;
122 p++;
123 } while (ret > 0);
124
125 /* return number of successfully read bytes */
126 return bytes_read;
127 }
128
read_objdump_output(FILE * f,void * buf,size_t * len,u64 start_addr)129 static int read_objdump_output(FILE *f, void *buf, size_t *len, u64 start_addr)
130 {
131 char *line = NULL;
132 size_t line_len, off_last = 0;
133 ssize_t ret;
134 int err = 0;
135 u64 addr, last_addr = start_addr;
136
137 while (off_last < *len) {
138 size_t off, read_bytes, written_bytes;
139 unsigned char tmp[BUFSZ];
140
141 ret = getline(&line, &line_len, f);
142 if (feof(f))
143 break;
144 if (ret < 0) {
145 pr_debug("getline failed\n");
146 err = -1;
147 break;
148 }
149
150 /* read objdump data into temporary buffer */
151 read_bytes = read_objdump_line(line, tmp, sizeof(tmp));
152 if (!read_bytes)
153 continue;
154
155 if (sscanf(line, "%"PRIx64, &addr) != 1)
156 continue;
157 if (addr < last_addr) {
158 pr_debug("addr going backwards, read beyond section?\n");
159 break;
160 }
161 last_addr = addr;
162
163 /* copy it from temporary buffer to 'buf' according
164 * to address on current objdump line */
165 off = addr - start_addr;
166 if (off >= *len)
167 break;
168 written_bytes = MIN(read_bytes, *len - off);
169 memcpy(buf + off, tmp, written_bytes);
170 off_last = off + written_bytes;
171 }
172
173 /* len returns number of bytes that could not be read */
174 *len -= off_last;
175
176 free(line);
177
178 return err;
179 }
180
181 /*
182 * Only gets GNU objdump version. Returns 0 for llvm-objdump.
183 */
objdump_version(void)184 static int objdump_version(void)
185 {
186 size_t line_len;
187 char cmd[PATH_MAX * 2];
188 char *line = NULL;
189 const char *fmt;
190 FILE *f;
191 int ret;
192
193 int version_tmp, version_num = 0;
194 char *version = 0, *token;
195
196 fmt = "%s --version";
197 ret = snprintf(cmd, sizeof(cmd), fmt, test_objdump_path);
198 if (ret <= 0 || (size_t)ret >= sizeof(cmd))
199 return -1;
200 /* Ignore objdump errors */
201 strcat(cmd, " 2>/dev/null");
202 f = popen(cmd, "r");
203 if (!f) {
204 pr_debug("popen failed\n");
205 return -1;
206 }
207 /* Get first line of objdump --version output */
208 ret = getline(&line, &line_len, f);
209 pclose(f);
210 if (ret < 0) {
211 pr_debug("getline failed\n");
212 return -1;
213 }
214
215 token = strsep(&line, " ");
216 if (token != NULL && !strcmp(token, "GNU")) {
217 // version is last part of first line of objdump --version output.
218 while ((token = strsep(&line, " ")))
219 version = token;
220
221 // Convert version into a format we can compare with
222 token = strsep(&version, ".");
223 version_num = atoi(token);
224 if (version_num)
225 version_num *= 10000;
226
227 token = strsep(&version, ".");
228 version_tmp = atoi(token);
229 if (token)
230 version_num += version_tmp * 100;
231
232 token = strsep(&version, ".");
233 version_tmp = atoi(token);
234 if (token)
235 version_num += version_tmp;
236 }
237
238 return version_num;
239 }
240
read_via_objdump(const char * filename,u64 addr,void * buf,size_t len)241 static int read_via_objdump(const char *filename, u64 addr, void *buf,
242 size_t len)
243 {
244 u64 stop_address = addr + len;
245 struct utsname uname_buf;
246 char cmd[PATH_MAX * 2];
247 const char *fmt;
248 FILE *f;
249 int ret;
250
251 ret = uname(&uname_buf);
252 if (ret) {
253 pr_debug("uname failed\n");
254 return -1;
255 }
256
257 if (!strncmp(uname_buf.machine, "riscv", 5)) {
258 int version = objdump_version();
259
260 /* Default to this workaround if version parsing fails */
261 if (version < 0 || version > 24100) {
262 /*
263 * Starting at riscv objdump version 2.41, dumping in
264 * the middle of an instruction is not supported. riscv
265 * instructions are aligned along 2-byte intervals and
266 * can be either 2-bytes or 4-bytes. This makes it
267 * possible that the stop-address lands in the middle of
268 * a 4-byte instruction. Increase the stop_address by
269 * two to ensure an instruction is not cut in half, but
270 * leave the len as-is so only the expected number of
271 * bytes are collected.
272 */
273 stop_address += 2;
274 }
275 }
276
277 fmt = "%s -z -d --start-address=0x%"PRIx64" --stop-address=0x%"PRIx64" %s";
278 ret = snprintf(cmd, sizeof(cmd), fmt, test_objdump_path, addr, stop_address,
279 filename);
280 if (ret <= 0 || (size_t)ret >= sizeof(cmd))
281 return -1;
282
283 pr_debug("Objdump command is: %s\n", cmd);
284
285 /* Ignore objdump errors */
286 strcat(cmd, " 2>/dev/null");
287
288 f = popen(cmd, "r");
289 if (!f) {
290 pr_debug("popen failed\n");
291 return -1;
292 }
293
294 ret = read_objdump_output(f, buf, &len, addr);
295 if (len) {
296 pr_debug("objdump read too few bytes: %zd\n", len);
297 if (!ret)
298 ret = len;
299 }
300
301 pclose(f);
302
303 return ret;
304 }
305
dump_buf(unsigned char * buf,size_t len)306 static void dump_buf(unsigned char *buf, size_t len)
307 {
308 size_t i;
309
310 for (i = 0; i < len; i++) {
311 pr_debug("0x%02x ", buf[i]);
312 if (i % 16 == 15)
313 pr_debug("\n");
314 }
315 pr_debug("\n");
316 }
317
read_object_code(u64 addr,size_t len,u8 cpumode,struct thread * thread,struct state * state)318 static int read_object_code(u64 addr, size_t len, u8 cpumode,
319 struct thread *thread, struct state *state)
320 {
321 struct addr_location al;
322 unsigned char buf1[BUFSZ] = {0};
323 unsigned char buf2[BUFSZ] = {0};
324 size_t ret_len;
325 u64 objdump_addr;
326 const char *objdump_name;
327 char decomp_name[KMOD_DECOMP_LEN];
328 bool decomp = false;
329 int ret, err = 0;
330 struct dso *dso;
331
332 pr_debug("Reading object code for memory address: %#"PRIx64"\n", addr);
333
334 addr_location__init(&al);
335 if (!thread__find_map(thread, cpumode, addr, &al) || !map__dso(al.map)) {
336 if (cpumode == PERF_RECORD_MISC_HYPERVISOR) {
337 pr_debug("Hypervisor address can not be resolved - skipping\n");
338 goto out;
339 }
340
341 pr_debug("thread__find_map failed\n");
342 err = -1;
343 goto out;
344 }
345 dso = map__dso(al.map);
346 pr_debug("File is: %s\n", dso__long_name(dso));
347
348 if (dso__symtab_type(dso) == DSO_BINARY_TYPE__KALLSYMS && !dso__is_kcore(dso)) {
349 pr_debug("Unexpected kernel address - skipping\n");
350 goto out;
351 }
352
353 pr_debug("On file address is: %#"PRIx64"\n", al.addr);
354
355 if (len > BUFSZ)
356 len = BUFSZ;
357
358 /* Do not go off the map */
359 if (addr + len > map__end(al.map))
360 len = map__end(al.map) - addr;
361
362 /*
363 * Some architectures (ex: powerpc) have stubs (trampolines) in kernel
364 * modules to manage long jumps. Check if the ip offset falls in stubs
365 * sections for kernel modules. And skip module address after text end
366 */
367 if (dso__is_kmod(dso) && al.addr > dso__text_end(dso)) {
368 pr_debug("skipping the module address %#"PRIx64" after text end\n", al.addr);
369 goto out;
370 }
371
372 /* Read the object code using perf */
373 ret_len = dso__data_read_offset(dso, maps__machine(thread__maps(thread)),
374 al.addr, buf1, len);
375 if (ret_len != len) {
376 pr_debug("dso__data_read_offset failed\n");
377 err = -1;
378 goto out;
379 }
380
381 /*
382 * Converting addresses for use by objdump requires more information.
383 * map__load() does that. See map__rip_2objdump() for details.
384 */
385 if (map__load(al.map)) {
386 err = -1;
387 goto out;
388 }
389
390 /* objdump struggles with kcore - try each map only once */
391 if (dso__is_kcore(dso)) {
392 size_t d;
393
394 for (d = 0; d < state->done_cnt; d++) {
395 if (state->done[d] == map__start(al.map)) {
396 pr_debug("kcore map tested already");
397 pr_debug(" - skipping\n");
398 goto out;
399 }
400 }
401 if (state->done_cnt >= ARRAY_SIZE(state->done)) {
402 pr_debug("Too many kcore maps - skipping\n");
403 goto out;
404 }
405 state->done[state->done_cnt++] = map__start(al.map);
406 }
407
408 objdump_name = dso__long_name(dso);
409 if (dso__needs_decompress(dso)) {
410 if (dso__decompress_kmodule_path(dso, objdump_name,
411 decomp_name,
412 sizeof(decomp_name)) < 0) {
413 pr_debug("decompression failed\n");
414 err = -1;
415 goto out;
416 }
417
418 decomp = true;
419 objdump_name = decomp_name;
420 }
421
422 /* Read the object code using objdump */
423 objdump_addr = map__rip_2objdump(al.map, al.addr);
424 ret = read_via_objdump(objdump_name, objdump_addr, buf2, len);
425
426 if (decomp)
427 unlink(objdump_name);
428
429 if (ret > 0) {
430 /*
431 * The kernel maps are inaccurate - assume objdump is right in
432 * that case.
433 */
434 if (cpumode == PERF_RECORD_MISC_KERNEL ||
435 cpumode == PERF_RECORD_MISC_GUEST_KERNEL) {
436 len -= ret;
437 if (len) {
438 pr_debug("Reducing len to %zu\n", len);
439 } else if (dso__is_kcore(dso)) {
440 /*
441 * objdump cannot handle very large segments
442 * that may be found in kcore.
443 */
444 pr_debug("objdump failed for kcore");
445 pr_debug(" - skipping\n");
446 } else {
447 err = -1;
448 }
449 goto out;
450 }
451 }
452 if (ret < 0) {
453 pr_debug("read_via_objdump failed\n");
454 err = -1;
455 goto out;
456 }
457
458 /* The results should be identical */
459 if (memcmp(buf1, buf2, len)) {
460 pr_debug("Bytes read differ from those read by objdump\n");
461 pr_debug("buf1 (dso):\n");
462 dump_buf(buf1, len);
463 pr_debug("buf2 (objdump):\n");
464 dump_buf(buf2, len);
465 err = -1;
466 goto out;
467 }
468 pr_debug("Bytes read match those read by objdump\n");
469 out:
470 addr_location__exit(&al);
471 return err;
472 }
473
process_sample_event(struct machine * machine,struct evlist * evlist,union perf_event * event,struct state * state)474 static int process_sample_event(struct machine *machine,
475 struct evlist *evlist,
476 union perf_event *event, struct state *state)
477 {
478 struct perf_sample sample;
479 struct thread *thread;
480 int ret;
481
482 perf_sample__init(&sample, /*all=*/false);
483 ret = evlist__parse_sample(evlist, event, &sample);
484 if (ret) {
485 pr_debug("evlist__parse_sample failed\n");
486 ret = -1;
487 goto out;
488 }
489
490 thread = machine__findnew_thread(machine, sample.pid, sample.tid);
491 if (!thread) {
492 pr_debug("machine__findnew_thread failed\n");
493 ret = -1;
494 goto out;
495 }
496
497 ret = read_object_code(sample.ip, READLEN, sample.cpumode, thread, state);
498 thread__put(thread);
499 out:
500 perf_sample__exit(&sample);
501 return ret;
502 }
503
process_event(struct machine * machine,struct evlist * evlist,union perf_event * event,struct state * state)504 static int process_event(struct machine *machine, struct evlist *evlist,
505 union perf_event *event, struct state *state)
506 {
507 if (event->header.type == PERF_RECORD_SAMPLE)
508 return process_sample_event(machine, evlist, event, state);
509
510 if (event->header.type == PERF_RECORD_THROTTLE ||
511 event->header.type == PERF_RECORD_UNTHROTTLE)
512 return 0;
513
514 if (event->header.type < PERF_RECORD_MAX) {
515 int ret;
516
517 ret = machine__process_event(machine, event, NULL);
518 if (ret < 0)
519 pr_debug("machine__process_event failed, event type %u\n",
520 event->header.type);
521 return ret;
522 }
523
524 return 0;
525 }
526
process_events(struct machine * machine,struct evlist * evlist,struct state * state)527 static int process_events(struct machine *machine, struct evlist *evlist,
528 struct state *state)
529 {
530 union perf_event *event;
531 struct mmap *md;
532 int i, ret;
533
534 for (i = 0; i < evlist->core.nr_mmaps; i++) {
535 md = &evlist->mmap[i];
536 if (perf_mmap__read_init(&md->core) < 0)
537 continue;
538
539 while ((event = perf_mmap__read_event(&md->core)) != NULL) {
540 ret = process_event(machine, evlist, event, state);
541 perf_mmap__consume(&md->core);
542 if (ret < 0)
543 return ret;
544 }
545 perf_mmap__read_done(&md->core);
546 }
547 return 0;
548 }
549
comp(const void * a,const void * b)550 static int comp(const void *a, const void *b)
551 {
552 return *(int *)a - *(int *)b;
553 }
554
do_sort_something(void)555 static void do_sort_something(void)
556 {
557 int buf[40960], i;
558
559 for (i = 0; i < (int)ARRAY_SIZE(buf); i++)
560 buf[i] = ARRAY_SIZE(buf) - i - 1;
561
562 qsort(buf, ARRAY_SIZE(buf), sizeof(int), comp);
563
564 for (i = 0; i < (int)ARRAY_SIZE(buf); i++) {
565 if (buf[i] != i) {
566 pr_debug("qsort failed\n");
567 break;
568 }
569 }
570 }
571
sort_something(void)572 static void sort_something(void)
573 {
574 int i;
575
576 for (i = 0; i < 10; i++)
577 do_sort_something();
578 }
579
syscall_something(void)580 static void syscall_something(void)
581 {
582 int pipefd[2];
583 int i;
584
585 for (i = 0; i < 1000; i++) {
586 if (pipe(pipefd) < 0) {
587 pr_debug("pipe failed\n");
588 break;
589 }
590 close(pipefd[1]);
591 close(pipefd[0]);
592 }
593 }
594
fs_something(void)595 static void fs_something(void)
596 {
597 const char *test_file_name = "temp-perf-code-reading-test-file--";
598 FILE *f;
599 int i;
600
601 for (i = 0; i < 1000; i++) {
602 f = fopen(test_file_name, "w+");
603 if (f) {
604 fclose(f);
605 unlink(test_file_name);
606 }
607 }
608 }
609
do_something(void)610 static void do_something(void)
611 {
612 fs_something();
613
614 sort_something();
615
616 syscall_something();
617 }
618
619 enum {
620 TEST_CODE_READING_OK,
621 TEST_CODE_READING_NO_VMLINUX,
622 TEST_CODE_READING_NO_KCORE,
623 TEST_CODE_READING_NO_ACCESS,
624 TEST_CODE_READING_NO_KERNEL_OBJ,
625 };
626
do_test_code_reading(bool try_kcore)627 static int do_test_code_reading(bool try_kcore)
628 {
629 struct machine *machine;
630 struct thread *thread;
631 struct record_opts opts = {
632 .mmap_pages = UINT_MAX,
633 .user_freq = UINT_MAX,
634 .user_interval = ULLONG_MAX,
635 .freq = 500,
636 .target = {
637 .uses_mmap = true,
638 },
639 };
640 struct state state = {
641 .done_cnt = 0,
642 };
643 struct perf_thread_map *threads = NULL;
644 struct perf_cpu_map *cpus = NULL;
645 struct evlist *evlist = NULL;
646 struct evsel *evsel = NULL;
647 int err = -1, ret;
648 pid_t pid;
649 struct map *map;
650 bool have_vmlinux, have_kcore;
651 struct dso *dso;
652 const char *events[] = { "cycles", "cycles:u", "cpu-clock", "cpu-clock:u", NULL };
653 int evidx = 0;
654
655 pid = getpid();
656
657 machine = machine__new_host();
658 machine->env = &perf_env;
659
660 ret = machine__create_kernel_maps(machine);
661 if (ret < 0) {
662 pr_debug("machine__create_kernel_maps failed\n");
663 goto out_err;
664 }
665
666 /* Force the use of kallsyms instead of vmlinux to try kcore */
667 if (try_kcore)
668 symbol_conf.kallsyms_name = "/proc/kallsyms";
669
670 /* Load kernel map */
671 map = machine__kernel_map(machine);
672 ret = map__load(map);
673 if (ret < 0) {
674 pr_debug("map__load failed\n");
675 goto out_err;
676 }
677 dso = map__dso(map);
678 have_vmlinux = dso__is_vmlinux(dso);
679 have_kcore = dso__is_kcore(dso);
680
681 /* 2nd time through we just try kcore */
682 if (try_kcore && !have_kcore)
683 return TEST_CODE_READING_NO_KCORE;
684
685 /* No point getting kernel events if there is no kernel object */
686 if (!have_vmlinux && !have_kcore)
687 evidx++;
688
689 threads = thread_map__new_by_tid(pid);
690 if (!threads) {
691 pr_debug("thread_map__new_by_tid failed\n");
692 goto out_err;
693 }
694
695 ret = perf_event__synthesize_thread_map(NULL, threads,
696 perf_event__process, machine,
697 true, false);
698 if (ret < 0) {
699 pr_debug("perf_event__synthesize_thread_map failed\n");
700 goto out_err;
701 }
702
703 thread = machine__findnew_thread(machine, pid, pid);
704 if (!thread) {
705 pr_debug("machine__findnew_thread failed\n");
706 goto out_put;
707 }
708
709 cpus = perf_cpu_map__new_online_cpus();
710 if (!cpus) {
711 pr_debug("perf_cpu_map__new failed\n");
712 goto out_put;
713 }
714
715 while (events[evidx]) {
716 const char *str;
717
718 evlist = evlist__new();
719 if (!evlist) {
720 pr_debug("evlist__new failed\n");
721 goto out_put;
722 }
723
724 perf_evlist__set_maps(&evlist->core, cpus, threads);
725
726 str = events[evidx];
727 pr_debug("Parsing event '%s'\n", str);
728 ret = parse_event(evlist, str);
729 if (ret < 0) {
730 pr_debug("parse_events failed\n");
731 goto out_put;
732 }
733
734 evlist__config(evlist, &opts, NULL);
735
736 evlist__for_each_entry(evlist, evsel) {
737 evsel->core.attr.comm = 1;
738 evsel->core.attr.disabled = 1;
739 evsel->core.attr.enable_on_exec = 0;
740 }
741
742 ret = evlist__open(evlist);
743 if (ret < 0) {
744 evidx++;
745
746 if (events[evidx] == NULL && verbose > 0) {
747 char errbuf[512];
748 evlist__strerror_open(evlist, errno, errbuf, sizeof(errbuf));
749 pr_debug("perf_evlist__open() failed!\n%s\n", errbuf);
750 }
751
752 /*
753 * Both cpus and threads are now owned by evlist
754 * and will be freed by following perf_evlist__set_maps
755 * call. Getting reference to keep them alive.
756 */
757 perf_cpu_map__get(cpus);
758 perf_thread_map__get(threads);
759 perf_evlist__set_maps(&evlist->core, NULL, NULL);
760 evlist__delete(evlist);
761 evlist = NULL;
762 continue;
763 }
764 break;
765 }
766
767 if (events[evidx] == NULL)
768 goto out_put;
769
770 ret = evlist__mmap(evlist, UINT_MAX);
771 if (ret < 0) {
772 pr_debug("evlist__mmap failed\n");
773 goto out_put;
774 }
775
776 evlist__enable(evlist);
777
778 do_something();
779
780 evlist__disable(evlist);
781
782 ret = process_events(machine, evlist, &state);
783 if (ret < 0)
784 goto out_put;
785
786 if (!have_vmlinux && !have_kcore && !try_kcore)
787 err = TEST_CODE_READING_NO_KERNEL_OBJ;
788 else if (!have_vmlinux && !try_kcore)
789 err = TEST_CODE_READING_NO_VMLINUX;
790 else if (strstr(events[evidx], ":u"))
791 err = TEST_CODE_READING_NO_ACCESS;
792 else
793 err = TEST_CODE_READING_OK;
794 out_put:
795 thread__put(thread);
796 out_err:
797 evlist__delete(evlist);
798 perf_cpu_map__put(cpus);
799 perf_thread_map__put(threads);
800 machine__delete(machine);
801
802 return err;
803 }
804
test__code_reading(struct test_suite * test __maybe_unused,int subtest __maybe_unused)805 static int test__code_reading(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
806 {
807 int ret;
808
809 ret = do_test_code_reading(false);
810 if (!ret)
811 ret = do_test_code_reading(true);
812
813 switch (ret) {
814 case TEST_CODE_READING_OK:
815 return 0;
816 case TEST_CODE_READING_NO_VMLINUX:
817 pr_debug("no vmlinux\n");
818 return 0;
819 case TEST_CODE_READING_NO_KCORE:
820 pr_debug("no kcore\n");
821 return 0;
822 case TEST_CODE_READING_NO_ACCESS:
823 pr_debug("no access\n");
824 return 0;
825 case TEST_CODE_READING_NO_KERNEL_OBJ:
826 pr_debug("no kernel obj\n");
827 return 0;
828 default:
829 return -1;
830 };
831 }
832
833 DEFINE_SUITE("Object code reading", code_reading);
834