1 /* 2 * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator 3 * 4 * PAPR Virtual TPM 5 * 6 * Copyright (c) 2015, 2017, 2019 IBM Corporation. 7 * 8 * Authors: 9 * Stefan Berger <stefanb@linux.vnet.ibm.com> 10 * 11 * This code is licensed under the GPL version 2 or later. See the 12 * COPYING file in the top-level directory. 13 * 14 */ 15 16 #include "qemu/osdep.h" 17 #include "qemu/error-report.h" 18 #include "qapi/error.h" 19 #include "hw/qdev-properties.h" 20 #include "migration/vmstate.h" 21 22 #include "sysemu/tpm_backend.h" 23 #include "tpm_util.h" 24 25 #include "hw/ppc/spapr.h" 26 #include "hw/ppc/spapr_vio.h" 27 #include "trace.h" 28 29 #define DEBUG_SPAPR 0 30 31 #define VIO_SPAPR_VTPM(obj) \ 32 OBJECT_CHECK(SpaprTpmState, (obj), TYPE_TPM_SPAPR) 33 34 typedef struct TpmCrq { 35 uint8_t valid; /* 0x80: cmd; 0xc0: init crq */ 36 /* 0x81-0x83: CRQ message response */ 37 uint8_t msg; /* see below */ 38 uint16_t len; /* len of TPM request; len of TPM response */ 39 uint32_t data; /* rtce_dma_handle when sending TPM request */ 40 uint64_t reserved; 41 } TpmCrq; 42 43 #define SPAPR_VTPM_VALID_INIT_CRQ_COMMAND 0xC0 44 #define SPAPR_VTPM_VALID_COMMAND 0x80 45 #define SPAPR_VTPM_MSG_RESULT 0x80 46 47 /* msg types for valid = SPAPR_VTPM_VALID_INIT_CRQ */ 48 #define SPAPR_VTPM_INIT_CRQ_RESULT 0x1 49 #define SPAPR_VTPM_INIT_CRQ_COMPLETE_RESULT 0x2 50 51 /* msg types for valid = SPAPR_VTPM_VALID_CMD */ 52 #define SPAPR_VTPM_GET_VERSION 0x1 53 #define SPAPR_VTPM_TPM_COMMAND 0x2 54 #define SPAPR_VTPM_GET_RTCE_BUFFER_SIZE 0x3 55 #define SPAPR_VTPM_PREPARE_TO_SUSPEND 0x4 56 57 /* response error messages */ 58 #define SPAPR_VTPM_VTPM_ERROR 0xff 59 60 /* error codes */ 61 #define SPAPR_VTPM_ERR_COPY_IN_FAILED 0x3 62 #define SPAPR_VTPM_ERR_COPY_OUT_FAILED 0x4 63 64 #define TPM_SPAPR_BUFFER_MAX 4096 65 66 typedef struct { 67 SpaprVioDevice vdev; 68 69 TpmCrq crq; /* track single TPM command */ 70 71 uint8_t state; 72 #define SPAPR_VTPM_STATE_NONE 0 73 #define SPAPR_VTPM_STATE_EXECUTION 1 74 #define SPAPR_VTPM_STATE_COMPLETION 2 75 76 unsigned char *buffer; 77 78 uint32_t numbytes; /* number of bytes to deliver on resume */ 79 80 TPMBackendCmd cmd; 81 82 TPMBackend *be_driver; 83 TPMVersion be_tpm_version; 84 85 size_t be_buffer_size; 86 } SpaprTpmState; 87 88 /* 89 * Send a request to the TPM. 90 */ 91 static void tpm_spapr_tpm_send(SpaprTpmState *s) 92 { 93 if (trace_event_get_state_backends(TRACE_TPM_SPAPR_SHOW_BUFFER)) { 94 tpm_util_show_buffer(s->buffer, s->be_buffer_size, "To TPM"); 95 } 96 97 s->state = SPAPR_VTPM_STATE_EXECUTION; 98 s->cmd = (TPMBackendCmd) { 99 .locty = 0, 100 .in = s->buffer, 101 .in_len = MIN(tpm_cmd_get_size(s->buffer), s->be_buffer_size), 102 .out = s->buffer, 103 .out_len = s->be_buffer_size, 104 }; 105 106 tpm_backend_deliver_request(s->be_driver, &s->cmd); 107 } 108 109 static int tpm_spapr_process_cmd(SpaprTpmState *s, uint64_t dataptr) 110 { 111 long rc; 112 113 /* a max. of be_buffer_size bytes can be transported */ 114 rc = spapr_vio_dma_read(&s->vdev, dataptr, 115 s->buffer, s->be_buffer_size); 116 if (rc) { 117 error_report("tpm_spapr_got_payload: DMA read failure"); 118 } 119 /* let vTPM handle any malformed request */ 120 tpm_spapr_tpm_send(s); 121 122 return rc; 123 } 124 125 static inline int spapr_tpm_send_crq(struct SpaprVioDevice *dev, TpmCrq *crq) 126 { 127 return spapr_vio_send_crq(dev, (uint8_t *)crq); 128 } 129 130 static int tpm_spapr_do_crq(struct SpaprVioDevice *dev, uint8_t *crq_data) 131 { 132 SpaprTpmState *s = VIO_SPAPR_VTPM(dev); 133 TpmCrq local_crq; 134 TpmCrq *crq = &s->crq; /* requests only */ 135 int rc; 136 uint8_t valid = crq_data[0]; 137 uint8_t msg = crq_data[1]; 138 139 trace_tpm_spapr_do_crq(valid, msg); 140 141 switch (valid) { 142 case SPAPR_VTPM_VALID_INIT_CRQ_COMMAND: /* Init command/response */ 143 144 /* Respond to initialization request */ 145 switch (msg) { 146 case SPAPR_VTPM_INIT_CRQ_RESULT: 147 trace_tpm_spapr_do_crq_crq_result(); 148 memset(&local_crq, 0, sizeof(local_crq)); 149 local_crq.valid = SPAPR_VTPM_VALID_INIT_CRQ_COMMAND; 150 local_crq.msg = SPAPR_VTPM_INIT_CRQ_RESULT; 151 spapr_tpm_send_crq(dev, &local_crq); 152 break; 153 154 case SPAPR_VTPM_INIT_CRQ_COMPLETE_RESULT: 155 trace_tpm_spapr_do_crq_crq_complete_result(); 156 memset(&local_crq, 0, sizeof(local_crq)); 157 local_crq.valid = SPAPR_VTPM_VALID_INIT_CRQ_COMMAND; 158 local_crq.msg = SPAPR_VTPM_INIT_CRQ_COMPLETE_RESULT; 159 spapr_tpm_send_crq(dev, &local_crq); 160 break; 161 } 162 163 break; 164 case SPAPR_VTPM_VALID_COMMAND: /* Payloads */ 165 switch (msg) { 166 case SPAPR_VTPM_TPM_COMMAND: 167 trace_tpm_spapr_do_crq_tpm_command(); 168 if (s->state == SPAPR_VTPM_STATE_EXECUTION) { 169 return H_BUSY; 170 } 171 memcpy(crq, crq_data, sizeof(*crq)); 172 173 rc = tpm_spapr_process_cmd(s, be32_to_cpu(crq->data)); 174 175 if (rc == H_SUCCESS) { 176 crq->valid = be16_to_cpu(0); 177 } else { 178 local_crq.valid = SPAPR_VTPM_MSG_RESULT; 179 local_crq.msg = SPAPR_VTPM_VTPM_ERROR; 180 local_crq.len = cpu_to_be16(0); 181 local_crq.data = cpu_to_be32(SPAPR_VTPM_ERR_COPY_IN_FAILED); 182 spapr_tpm_send_crq(dev, &local_crq); 183 } 184 break; 185 186 case SPAPR_VTPM_GET_RTCE_BUFFER_SIZE: 187 trace_tpm_spapr_do_crq_tpm_get_rtce_buffer_size(s->be_buffer_size); 188 local_crq.valid = SPAPR_VTPM_VALID_COMMAND; 189 local_crq.msg = SPAPR_VTPM_GET_RTCE_BUFFER_SIZE | 190 SPAPR_VTPM_MSG_RESULT; 191 local_crq.len = cpu_to_be16(s->be_buffer_size); 192 spapr_tpm_send_crq(dev, &local_crq); 193 break; 194 195 case SPAPR_VTPM_GET_VERSION: 196 local_crq.valid = SPAPR_VTPM_VALID_COMMAND; 197 local_crq.msg = SPAPR_VTPM_GET_VERSION | SPAPR_VTPM_MSG_RESULT; 198 local_crq.len = cpu_to_be16(0); 199 switch (s->be_tpm_version) { 200 case TPM_VERSION_1_2: 201 local_crq.data = cpu_to_be32(1); 202 break; 203 case TPM_VERSION_2_0: 204 local_crq.data = cpu_to_be32(2); 205 break; 206 default: 207 g_assert_not_reached(); 208 break; 209 } 210 trace_tpm_spapr_do_crq_get_version(be32_to_cpu(local_crq.data)); 211 spapr_tpm_send_crq(dev, &local_crq); 212 break; 213 214 case SPAPR_VTPM_PREPARE_TO_SUSPEND: 215 trace_tpm_spapr_do_crq_prepare_to_suspend(); 216 local_crq.valid = SPAPR_VTPM_VALID_COMMAND; 217 local_crq.msg = SPAPR_VTPM_PREPARE_TO_SUSPEND | 218 SPAPR_VTPM_MSG_RESULT; 219 spapr_tpm_send_crq(dev, &local_crq); 220 break; 221 222 default: 223 trace_tpm_spapr_do_crq_unknown_msg_type(crq->msg); 224 } 225 break; 226 default: 227 trace_tpm_spapr_do_crq_unknown_crq(valid, msg); 228 }; 229 230 return H_SUCCESS; 231 } 232 233 static void tpm_spapr_request_completed(TPMIf *ti, int ret) 234 { 235 SpaprTpmState *s = VIO_SPAPR_VTPM(ti); 236 TpmCrq *crq = &s->crq; 237 uint32_t len; 238 int rc; 239 240 s->state = SPAPR_VTPM_STATE_COMPLETION; 241 242 /* a max. of be_buffer_size bytes can be transported */ 243 len = MIN(tpm_cmd_get_size(s->buffer), s->be_buffer_size); 244 245 if (runstate_check(RUN_STATE_FINISH_MIGRATE)) { 246 trace_tpm_spapr_caught_response(len); 247 /* defer delivery of response until .post_load */ 248 s->numbytes = len; 249 return; 250 } 251 252 rc = spapr_vio_dma_write(&s->vdev, be32_to_cpu(crq->data), 253 s->buffer, len); 254 255 if (trace_event_get_state_backends(TRACE_TPM_SPAPR_SHOW_BUFFER)) { 256 tpm_util_show_buffer(s->buffer, len, "From TPM"); 257 } 258 259 crq->valid = SPAPR_VTPM_MSG_RESULT; 260 if (rc == H_SUCCESS) { 261 crq->msg = SPAPR_VTPM_TPM_COMMAND | SPAPR_VTPM_MSG_RESULT; 262 crq->len = cpu_to_be16(len); 263 } else { 264 error_report("%s: DMA write failure", __func__); 265 crq->msg = SPAPR_VTPM_VTPM_ERROR; 266 crq->len = cpu_to_be16(0); 267 crq->data = cpu_to_be32(SPAPR_VTPM_ERR_COPY_OUT_FAILED); 268 } 269 270 rc = spapr_tpm_send_crq(&s->vdev, crq); 271 if (rc) { 272 error_report("%s: Error sending response", __func__); 273 } 274 } 275 276 static int tpm_spapr_do_startup_tpm(SpaprTpmState *s, size_t buffersize) 277 { 278 return tpm_backend_startup_tpm(s->be_driver, buffersize); 279 } 280 281 static const char *tpm_spapr_get_dt_compatible(SpaprVioDevice *dev) 282 { 283 SpaprTpmState *s = VIO_SPAPR_VTPM(dev); 284 285 switch (s->be_tpm_version) { 286 case TPM_VERSION_1_2: 287 return "IBM,vtpm"; 288 case TPM_VERSION_2_0: 289 return "IBM,vtpm20"; 290 default: 291 g_assert_not_reached(); 292 } 293 } 294 295 static void tpm_spapr_reset(SpaprVioDevice *dev) 296 { 297 SpaprTpmState *s = VIO_SPAPR_VTPM(dev); 298 299 s->state = SPAPR_VTPM_STATE_NONE; 300 s->numbytes = 0; 301 302 s->be_tpm_version = tpm_backend_get_tpm_version(s->be_driver); 303 304 s->be_buffer_size = MIN(tpm_backend_get_buffer_size(s->be_driver), 305 TPM_SPAPR_BUFFER_MAX); 306 307 tpm_backend_reset(s->be_driver); 308 tpm_spapr_do_startup_tpm(s, s->be_buffer_size); 309 } 310 311 static enum TPMVersion tpm_spapr_get_version(TPMIf *ti) 312 { 313 SpaprTpmState *s = VIO_SPAPR_VTPM(ti); 314 315 if (tpm_backend_had_startup_error(s->be_driver)) { 316 return TPM_VERSION_UNSPEC; 317 } 318 319 return tpm_backend_get_tpm_version(s->be_driver); 320 } 321 322 /* persistent state handling */ 323 324 static int tpm_spapr_pre_save(void *opaque) 325 { 326 SpaprTpmState *s = opaque; 327 328 tpm_backend_finish_sync(s->be_driver); 329 /* 330 * we cannot deliver the results to the VM since DMA would touch VM memory 331 */ 332 333 return 0; 334 } 335 336 static int tpm_spapr_post_load(void *opaque, int version_id) 337 { 338 SpaprTpmState *s = opaque; 339 340 if (s->numbytes) { 341 trace_tpm_spapr_post_load(); 342 /* deliver the results to the VM via DMA */ 343 tpm_spapr_request_completed(TPM_IF(s), 0); 344 s->numbytes = 0; 345 } 346 347 return 0; 348 } 349 350 static const VMStateDescription vmstate_spapr_vtpm = { 351 .name = "tpm-spapr", 352 .pre_save = tpm_spapr_pre_save, 353 .post_load = tpm_spapr_post_load, 354 .fields = (VMStateField[]) { 355 VMSTATE_SPAPR_VIO(vdev, SpaprTpmState), 356 357 VMSTATE_UINT8(state, SpaprTpmState), 358 VMSTATE_UINT32(numbytes, SpaprTpmState), 359 VMSTATE_VBUFFER_UINT32(buffer, SpaprTpmState, 0, NULL, numbytes), 360 /* remember DMA address */ 361 VMSTATE_UINT32(crq.data, SpaprTpmState), 362 VMSTATE_END_OF_LIST(), 363 } 364 }; 365 366 static Property tpm_spapr_properties[] = { 367 DEFINE_SPAPR_PROPERTIES(SpaprTpmState, vdev), 368 DEFINE_PROP_TPMBE("tpmdev", SpaprTpmState, be_driver), 369 DEFINE_PROP_END_OF_LIST(), 370 }; 371 372 static void tpm_spapr_realizefn(SpaprVioDevice *dev, Error **errp) 373 { 374 SpaprTpmState *s = VIO_SPAPR_VTPM(dev); 375 376 if (!tpm_find()) { 377 error_setg(errp, "at most one TPM device is permitted"); 378 return; 379 } 380 381 dev->crq.SendFunc = tpm_spapr_do_crq; 382 383 if (!s->be_driver) { 384 error_setg(errp, "'tpmdev' property is required"); 385 return; 386 } 387 s->buffer = g_malloc(TPM_SPAPR_BUFFER_MAX); 388 } 389 390 static void tpm_spapr_class_init(ObjectClass *klass, void *data) 391 { 392 DeviceClass *dc = DEVICE_CLASS(klass); 393 SpaprVioDeviceClass *k = VIO_SPAPR_DEVICE_CLASS(klass); 394 TPMIfClass *tc = TPM_IF_CLASS(klass); 395 396 k->realize = tpm_spapr_realizefn; 397 k->reset = tpm_spapr_reset; 398 k->dt_name = "vtpm"; 399 k->dt_type = "IBM,vtpm"; 400 k->get_dt_compatible = tpm_spapr_get_dt_compatible; 401 k->signal_mask = 0x00000001; 402 set_bit(DEVICE_CATEGORY_MISC, dc->categories); 403 device_class_set_props(dc, tpm_spapr_properties); 404 k->rtce_window_size = 0x10000000; 405 dc->vmsd = &vmstate_spapr_vtpm; 406 407 tc->model = TPM_MODEL_TPM_SPAPR; 408 tc->get_version = tpm_spapr_get_version; 409 tc->request_completed = tpm_spapr_request_completed; 410 } 411 412 static const TypeInfo tpm_spapr_info = { 413 .name = TYPE_TPM_SPAPR, 414 .parent = TYPE_VIO_SPAPR_DEVICE, 415 .instance_size = sizeof(SpaprTpmState), 416 .class_init = tpm_spapr_class_init, 417 .interfaces = (InterfaceInfo[]) { 418 { TYPE_TPM_IF }, 419 { } 420 } 421 }; 422 423 static void tpm_spapr_register_types(void) 424 { 425 type_register_static(&tpm_spapr_info); 426 } 427 428 type_init(tpm_spapr_register_types) 429