xref: /linux/arch/x86/include/asm/string_64.h (revision 1ac731c529cd4d6adbce134754b51ff7d822b145)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
21965aae3SH. Peter Anvin #ifndef _ASM_X86_STRING_64_H
31965aae3SH. Peter Anvin #define _ASM_X86_STRING_64_H
41da177e4SLinus Torvalds 
51da177e4SLinus Torvalds #ifdef __KERNEL__
63637efb0STony Luck #include <linux/jump_label.h>
71da177e4SLinus Torvalds 
81da177e4SLinus Torvalds /* Written 2002 by Andi Kleen */
91da177e4SLinus Torvalds 
101da177e4SLinus Torvalds /* Even with __builtin_ the compiler may decide to use the out of line
111da177e4SLinus Torvalds    function. */
121da177e4SLinus Torvalds 
1378a498c3SAlexander Potapenko #if defined(__SANITIZE_MEMORY__) && defined(__NO_FORTIFY)
1478a498c3SAlexander Potapenko #include <linux/kmsan_string.h>
1578a498c3SAlexander Potapenko #endif
1678a498c3SAlexander Potapenko 
171da177e4SLinus Torvalds #define __HAVE_ARCH_MEMCPY 1
18a75ca545SAndrey Ryabinin extern void *memcpy(void *to, const void *from, size_t len);
19393f203fSAndrey Ryabinin extern void *__memcpy(void *to, const void *from, size_t len);
20393f203fSAndrey Ryabinin 
211da177e4SLinus Torvalds #define __HAVE_ARCH_MEMSET
226edfba1bSAndi Kleen void *memset(void *s, int c, size_t n);
23393f203fSAndrey Ryabinin void *__memset(void *s, int c, size_t n);
241da177e4SLinus Torvalds KCFI_REFERENCE(__memset);
25*27f644dcSAlexander Potapenko 
26*27f644dcSAlexander Potapenko /*
27*27f644dcSAlexander Potapenko  * KMSAN needs to instrument as much code as possible. Use C versions of
28*27f644dcSAlexander Potapenko  * memsetXX() from lib/string.c under KMSAN.
29*27f644dcSAlexander Potapenko  */
304c512485SMatthew Wilcox #if !defined(CONFIG_KMSAN)
314c512485SMatthew Wilcox #define __HAVE_ARCH_MEMSET16
memset16(uint16_t * s,uint16_t v,size_t n)324c512485SMatthew Wilcox static inline void *memset16(uint16_t *s, uint16_t v, size_t n)
334c512485SMatthew Wilcox {
344c512485SMatthew Wilcox 	const __auto_type s0 = s;
354c512485SMatthew Wilcox 	asm volatile (
364c512485SMatthew Wilcox 		"rep stosw"
374c512485SMatthew Wilcox 		: "+D" (s), "+c" (n)
384c512485SMatthew Wilcox 		: "a" (v)
394c512485SMatthew Wilcox 		: "memory"
404c512485SMatthew Wilcox 	);
414c512485SMatthew Wilcox 	return s0;
424c512485SMatthew Wilcox }
434c512485SMatthew Wilcox 
444c512485SMatthew Wilcox #define __HAVE_ARCH_MEMSET32
memset32(uint32_t * s,uint32_t v,size_t n)454c512485SMatthew Wilcox static inline void *memset32(uint32_t *s, uint32_t v, size_t n)
464c512485SMatthew Wilcox {
474c512485SMatthew Wilcox 	const __auto_type s0 = s;
484c512485SMatthew Wilcox 	asm volatile (
494c512485SMatthew Wilcox 		"rep stosl"
504c512485SMatthew Wilcox 		: "+D" (s), "+c" (n)
514c512485SMatthew Wilcox 		: "a" (v)
524c512485SMatthew Wilcox 		: "memory"
534c512485SMatthew Wilcox 	);
544c512485SMatthew Wilcox 	return s0;
554c512485SMatthew Wilcox }
564c512485SMatthew Wilcox 
574c512485SMatthew Wilcox #define __HAVE_ARCH_MEMSET64
memset64(uint64_t * s,uint64_t v,size_t n)584c512485SMatthew Wilcox static inline void *memset64(uint64_t *s, uint64_t v, size_t n)
594c512485SMatthew Wilcox {
604c512485SMatthew Wilcox 	const __auto_type s0 = s;
614c512485SMatthew Wilcox 	asm volatile (
624c512485SMatthew Wilcox 		"rep stosq"
634c512485SMatthew Wilcox 		: "+D" (s), "+c" (n)
644c512485SMatthew Wilcox 		: "a" (v)
65*27f644dcSAlexander Potapenko 		: "memory"
664c512485SMatthew Wilcox 	);
671da177e4SLinus Torvalds 	return s0;
681da177e4SLinus Torvalds }
69393f203fSAndrey Ryabinin #endif
701da177e4SLinus Torvalds 
711da177e4SLinus Torvalds #define __HAVE_ARCH_MEMMOVE
721da177e4SLinus Torvalds void *memmove(void *dest, const void *src, size_t count);
731da177e4SLinus Torvalds void *__memmove(void *dest, const void *src, size_t count);
741da177e4SLinus Torvalds KCFI_REFERENCE(__memmove);
751da177e4SLinus Torvalds 
761da177e4SLinus Torvalds int memcmp(const void *cs, const void *ct, size_t count);
770aed55afSDan Williams size_t strlen(const char *s);
780aed55afSDan Williams char *strcpy(char *dest, const char *src);
7902101c45SMikulas Patocka char *strcat(char *dest, const char *src);
8002101c45SMikulas Patocka int strcmp(const char *cs, const char *ct);
8102101c45SMikulas Patocka 
8202101c45SMikulas Patocka #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
8302101c45SMikulas Patocka #define __HAVE_ARCH_MEMCPY_FLUSHCACHE 1
8402101c45SMikulas Patocka void __memcpy_flushcache(void *dst, const void *src, size_t cnt);
memcpy_flushcache(void * dst,const void * src,size_t cnt)8502101c45SMikulas Patocka static __always_inline void memcpy_flushcache(void *dst, const void *src, size_t cnt)
8602101c45SMikulas Patocka {
8702101c45SMikulas Patocka 	if (__builtin_constant_p(cnt)) {
8802101c45SMikulas Patocka 		switch (cnt) {
8902101c45SMikulas Patocka 			case 4:
9002101c45SMikulas Patocka 				asm ("movntil %1, %0" : "=m"(*(u32 *)dst) : "r"(*(u32 *)src));
9102101c45SMikulas Patocka 				return;
9202101c45SMikulas Patocka 			case 8:
9302101c45SMikulas Patocka 				asm ("movntiq %1, %0" : "=m"(*(u64 *)dst) : "r"(*(u64 *)src));
9402101c45SMikulas Patocka 				return;
9502101c45SMikulas Patocka 			case 16:
9602101c45SMikulas Patocka 				asm ("movntiq %1, %0" : "=m"(*(u64 *)dst) : "r"(*(u64 *)src));
9702101c45SMikulas Patocka 				asm ("movntiq %1, %0" : "=m"(*(u64 *)(dst + 8)) : "r"(*(u64 *)(src + 8)));
980aed55afSDan Williams 				return;
990aed55afSDan Williams 		}
1001da177e4SLinus Torvalds 	}
1011da177e4SLinus Torvalds 	__memcpy_flushcache(dst, src, cnt);
1021965aae3SH. Peter Anvin }
103 #endif
104 
105 #endif /* __KERNEL__ */
106 
107 #endif /* _ASM_X86_STRING_64_H */
108