1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * AMD Memory Encryption Support 4 * 5 * Copyright (C) 2016 Advanced Micro Devices, Inc. 6 * 7 * Author: Tom Lendacky <thomas.lendacky@amd.com> 8 */ 9 10#include <linux/linkage.h> 11#include <linux/pgtable.h> 12#include <asm/page.h> 13#include <asm/processor-flags.h> 14#include <asm/msr-index.h> 15#include <asm/nospec-branch.h> 16 17 .text 18 .code64 19SYM_FUNC_START(sme_encrypt_execute) 20 21 /* 22 * Entry parameters: 23 * RDI - virtual address for the encrypted mapping 24 * RSI - virtual address for the decrypted mapping 25 * RDX - length to encrypt 26 * RCX - virtual address of the encryption workarea, including: 27 * - stack page (PAGE_SIZE) 28 * - encryption routine page (PAGE_SIZE) 29 * - intermediate copy buffer (PMD_SIZE) 30 * R8 - physical address of the pagetables to use for encryption 31 */ 32 33 push %rbp 34 movq %rsp, %rbp /* RBP now has original stack pointer */ 35 36 /* Set up a one page stack in the non-encrypted memory area */ 37 movq %rcx, %rax /* Workarea stack page */ 38 leaq PAGE_SIZE(%rax), %rsp /* Set new stack pointer */ 39 addq $PAGE_SIZE, %rax /* Workarea encryption routine */ 40 41 push %r12 42 movq %rdi, %r10 /* Encrypted area */ 43 movq %rsi, %r11 /* Decrypted area */ 44 movq %rdx, %r12 /* Area length */ 45 46 /* Copy encryption routine into the workarea */ 47 movq %rax, %rdi /* Workarea encryption routine */ 48 leaq __enc_copy(%rip), %rsi /* Encryption routine */ 49 movq $(.L__enc_copy_end - __enc_copy), %rcx /* Encryption routine length */ 50 rep movsb 51 52 /* Setup registers for call */ 53 movq %r10, %rdi /* Encrypted area */ 54 movq %r11, %rsi /* Decrypted area */ 55 movq %r8, %rdx /* Pagetables used for encryption */ 56 movq %r12, %rcx /* Area length */ 57 movq %rax, %r8 /* Workarea encryption routine */ 58 addq $PAGE_SIZE, %r8 /* Workarea intermediate copy buffer */ 59 60 ANNOTATE_RETPOLINE_SAFE 61 call *%rax /* Call the encryption routine */ 62 63 pop %r12 64 65 movq %rbp, %rsp /* Restore original stack pointer */ 66 pop %rbp 67 68 /* Offset to __x86_return_thunk would be wrong here */ 69 ANNOTATE_UNRET_SAFE 70 ret 71 int3 72SYM_FUNC_END(sme_encrypt_execute) 73 74SYM_FUNC_START(__enc_copy) 75 ANNOTATE_NOENDBR 76/* 77 * Routine used to encrypt memory in place. 78 * This routine must be run outside of the kernel proper since 79 * the kernel will be encrypted during the process. So this 80 * routine is defined here and then copied to an area outside 81 * of the kernel where it will remain and run decrypted 82 * during execution. 83 * 84 * On entry the registers must be: 85 * RDI - virtual address for the encrypted mapping 86 * RSI - virtual address for the decrypted mapping 87 * RDX - address of the pagetables to use for encryption 88 * RCX - length of area 89 * R8 - intermediate copy buffer 90 * 91 * RAX - points to this routine 92 * 93 * The area will be encrypted by copying from the non-encrypted 94 * memory space to an intermediate buffer and then copying from the 95 * intermediate buffer back to the encrypted memory space. The physical 96 * addresses of the two mappings are the same which results in the area 97 * being encrypted "in place". 98 */ 99 /* Enable the new page tables */ 100 mov %rdx, %cr3 101 102 /* Flush any global TLBs */ 103 mov %cr4, %rdx 104 andq $~X86_CR4_PGE, %rdx 105 mov %rdx, %cr4 106 orq $X86_CR4_PGE, %rdx 107 mov %rdx, %cr4 108 109 push %r15 110 push %r12 111 112 movq %rcx, %r9 /* Save area length */ 113 movq %rdi, %r10 /* Save encrypted area address */ 114 movq %rsi, %r11 /* Save decrypted area address */ 115 116 /* Set the PAT register PA5 entry to write-protect */ 117 movl $MSR_IA32_CR_PAT, %ecx 118 rdmsr 119 mov %rdx, %r15 /* Save original PAT value */ 120 andl $0xffff00ff, %edx /* Clear PA5 */ 121 orl $0x00000500, %edx /* Set PA5 to WP */ 122 wrmsr 123 124 wbinvd /* Invalidate any cache entries */ 125 126 /* Copy/encrypt up to 2MB at a time */ 127 movq $PMD_SIZE, %r12 1281: 129 cmpq %r12, %r9 130 jnb 2f 131 movq %r9, %r12 132 1332: 134 movq %r11, %rsi /* Source - decrypted area */ 135 movq %r8, %rdi /* Dest - intermediate copy buffer */ 136 movq %r12, %rcx 137 rep movsb 138 139 movq %r8, %rsi /* Source - intermediate copy buffer */ 140 movq %r10, %rdi /* Dest - encrypted area */ 141 movq %r12, %rcx 142 rep movsb 143 144 addq %r12, %r11 145 addq %r12, %r10 146 subq %r12, %r9 /* Kernel length decrement */ 147 jnz 1b /* Kernel length not zero? */ 148 149 /* Restore PAT register */ 150 movl $MSR_IA32_CR_PAT, %ecx 151 rdmsr 152 mov %r15, %rdx /* Restore original PAT value */ 153 wrmsr 154 155 pop %r12 156 pop %r15 157 158 /* Offset to __x86_return_thunk would be wrong here */ 159 ANNOTATE_UNRET_SAFE 160 ret 161 int3 162.L__enc_copy_end: 163SYM_FUNC_END(__enc_copy) 164