1/* SPDX-License-Identifier: GPL-2.0 */ 2/* Copyright 2002 Andi Kleen, SuSE Labs */ 3 4#include <linux/export.h> 5#include <linux/linkage.h> 6#include <linux/cfi_types.h> 7#include <asm/cpufeatures.h> 8#include <asm/alternative.h> 9 10.section .noinstr.text, "ax" 11 12/* 13 * ISO C memset - set a memory block to a byte value. This function uses fast 14 * string to get better performance than the original function. The code is 15 * simpler and shorter than the original function as well. 16 * 17 * rdi destination 18 * rsi value (char) 19 * rdx count (bytes) 20 * 21 * rax original destination 22 * 23 * The FSRS alternative should be done inline (avoiding the call and 24 * the disgusting return handling), but that would require some help 25 * from the compiler for better calling conventions. 26 * 27 * The 'rep stosb' itself is small enough to replace the call, but all 28 * the register moves blow up the code. And two of them are "needed" 29 * only for the return value that is the same as the source input, 30 * which the compiler could/should do much better anyway. 31 */ 32SYM_TYPED_FUNC_START(__memset) 33 ALTERNATIVE "jmp memset_orig", "", X86_FEATURE_FSRS 34 35 movq %rdi,%r9 36 movb %sil,%al 37 movq %rdx,%rcx 38 rep stosb 39 movq %r9,%rax 40 RET 41SYM_FUNC_END(__memset) 42EXPORT_SYMBOL(__memset) 43 44SYM_FUNC_ALIAS_MEMFUNC(memset, __memset) 45EXPORT_SYMBOL(memset) 46 47SYM_FUNC_START_LOCAL(memset_orig) 48 movq %rdi,%r10 49 50 /* expand byte value */ 51 movzbl %sil,%ecx 52 movabs $0x0101010101010101,%rax 53 imulq %rcx,%rax 54 55 /* align dst */ 56 movl %edi,%r9d 57 andl $7,%r9d 58 jnz .Lbad_alignment 59.Lafter_bad_alignment: 60 61 movq %rdx,%rcx 62 shrq $6,%rcx 63 jz .Lhandle_tail 64 65 .p2align 4 66.Lloop_64: 67 decq %rcx 68 movq %rax,(%rdi) 69 movq %rax,8(%rdi) 70 movq %rax,16(%rdi) 71 movq %rax,24(%rdi) 72 movq %rax,32(%rdi) 73 movq %rax,40(%rdi) 74 movq %rax,48(%rdi) 75 movq %rax,56(%rdi) 76 leaq 64(%rdi),%rdi 77 jnz .Lloop_64 78 79 /* Handle tail in loops. The loops should be faster than hard 80 to predict jump tables. */ 81 .p2align 4 82.Lhandle_tail: 83 movl %edx,%ecx 84 andl $63&(~7),%ecx 85 jz .Lhandle_7 86 shrl $3,%ecx 87 .p2align 4 88.Lloop_8: 89 decl %ecx 90 movq %rax,(%rdi) 91 leaq 8(%rdi),%rdi 92 jnz .Lloop_8 93 94.Lhandle_7: 95 andl $7,%edx 96 jz .Lende 97 .p2align 4 98.Lloop_1: 99 decl %edx 100 movb %al,(%rdi) 101 leaq 1(%rdi),%rdi 102 jnz .Lloop_1 103 104.Lende: 105 movq %r10,%rax 106 RET 107 108.Lbad_alignment: 109 cmpq $7,%rdx 110 jbe .Lhandle_7 111 movq %rax,(%rdi) /* unaligned store */ 112 movq $8,%r8 113 subq %r9,%r8 114 addq %r8,%rdi 115 subq %r8,%rdx 116 jmp .Lafter_bad_alignment 117.Lfinal: 118SYM_FUNC_END(memset_orig) 119