1 // SPDX-License-Identifier: GPL-2.0-or-later 2 // SPI init/core code 3 // 4 // Copyright (C) 2005 David Brownell 5 // Copyright (C) 2008 Secret Lab Technologies Ltd. 6 7 #include <linux/acpi.h> 8 #include <linux/cache.h> 9 #include <linux/clk/clk-conf.h> 10 #include <linux/delay.h> 11 #include <linux/device.h> 12 #include <linux/dmaengine.h> 13 #include <linux/dma-mapping.h> 14 #include <linux/export.h> 15 #include <linux/gpio/consumer.h> 16 #include <linux/highmem.h> 17 #include <linux/idr.h> 18 #include <linux/init.h> 19 #include <linux/ioport.h> 20 #include <linux/kernel.h> 21 #include <linux/kthread.h> 22 #include <linux/mod_devicetable.h> 23 #include <linux/mutex.h> 24 #include <linux/of_device.h> 25 #include <linux/of_irq.h> 26 #include <linux/percpu.h> 27 #include <linux/platform_data/x86/apple.h> 28 #include <linux/pm_domain.h> 29 #include <linux/pm_runtime.h> 30 #include <linux/property.h> 31 #include <linux/ptp_clock_kernel.h> 32 #include <linux/sched/rt.h> 33 #include <linux/slab.h> 34 #include <linux/spi/offload/types.h> 35 #include <linux/spi/spi.h> 36 #include <linux/spi/spi-mem.h> 37 #include <uapi/linux/sched/types.h> 38 39 #define CREATE_TRACE_POINTS 40 #include <trace/events/spi.h> 41 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_start); 42 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop); 43 44 #include "internals.h" 45 46 static DEFINE_IDR(spi_controller_idr); 47 48 static void spidev_release(struct device *dev) 49 { 50 struct spi_device *spi = to_spi_device(dev); 51 52 spi_controller_put(spi->controller); 53 free_percpu(spi->pcpu_statistics); 54 kfree(spi); 55 } 56 57 static ssize_t 58 modalias_show(struct device *dev, struct device_attribute *a, char *buf) 59 { 60 const struct spi_device *spi = to_spi_device(dev); 61 int len; 62 63 len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1); 64 if (len != -ENODEV) 65 return len; 66 67 return sysfs_emit(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias); 68 } 69 static DEVICE_ATTR_RO(modalias); 70 71 static ssize_t driver_override_store(struct device *dev, 72 struct device_attribute *a, 73 const char *buf, size_t count) 74 { 75 int ret; 76 77 ret = __device_set_driver_override(dev, buf, count); 78 if (ret) 79 return ret; 80 81 return count; 82 } 83 84 static ssize_t driver_override_show(struct device *dev, 85 struct device_attribute *a, char *buf) 86 { 87 guard(spinlock)(&dev->driver_override.lock); 88 return sysfs_emit(buf, "%s\n", dev->driver_override.name ?: ""); 89 } 90 static DEVICE_ATTR_RW(driver_override); 91 92 static struct spi_statistics __percpu *spi_alloc_pcpu_stats(void) 93 { 94 struct spi_statistics __percpu *pcpu_stats; 95 int cpu; 96 97 pcpu_stats = alloc_percpu_gfp(struct spi_statistics, GFP_KERNEL); 98 if (!pcpu_stats) 99 return NULL; 100 101 for_each_possible_cpu(cpu) { 102 struct spi_statistics *stat; 103 104 stat = per_cpu_ptr(pcpu_stats, cpu); 105 u64_stats_init(&stat->syncp); 106 } 107 108 return pcpu_stats; 109 } 110 111 static ssize_t spi_emit_pcpu_stats(struct spi_statistics __percpu *stat, 112 char *buf, size_t offset) 113 { 114 u64 val = 0; 115 int i; 116 117 for_each_possible_cpu(i) { 118 const struct spi_statistics *pcpu_stats; 119 u64_stats_t *field; 120 unsigned int start; 121 u64 inc; 122 123 pcpu_stats = per_cpu_ptr(stat, i); 124 field = (void *)pcpu_stats + offset; 125 do { 126 start = u64_stats_fetch_begin(&pcpu_stats->syncp); 127 inc = u64_stats_read(field); 128 } while (u64_stats_fetch_retry(&pcpu_stats->syncp, start)); 129 val += inc; 130 } 131 return sysfs_emit(buf, "%llu\n", val); 132 } 133 134 #define SPI_STATISTICS_ATTRS(field, file) \ 135 static ssize_t spi_controller_##field##_show(struct device *dev, \ 136 struct device_attribute *attr, \ 137 char *buf) \ 138 { \ 139 struct spi_controller *ctlr = container_of(dev, \ 140 struct spi_controller, dev); \ 141 return spi_statistics_##field##_show(ctlr->pcpu_statistics, buf); \ 142 } \ 143 static struct device_attribute dev_attr_spi_controller_##field = { \ 144 .attr = { .name = file, .mode = 0444 }, \ 145 .show = spi_controller_##field##_show, \ 146 }; \ 147 static ssize_t spi_device_##field##_show(struct device *dev, \ 148 struct device_attribute *attr, \ 149 char *buf) \ 150 { \ 151 struct spi_device *spi = to_spi_device(dev); \ 152 return spi_statistics_##field##_show(spi->pcpu_statistics, buf); \ 153 } \ 154 static struct device_attribute dev_attr_spi_device_##field = { \ 155 .attr = { .name = file, .mode = 0444 }, \ 156 .show = spi_device_##field##_show, \ 157 } 158 159 #define SPI_STATISTICS_SHOW_NAME(name, file, field) \ 160 static ssize_t spi_statistics_##name##_show(struct spi_statistics __percpu *stat, \ 161 char *buf) \ 162 { \ 163 return spi_emit_pcpu_stats(stat, buf, \ 164 offsetof(struct spi_statistics, field)); \ 165 } \ 166 SPI_STATISTICS_ATTRS(name, file) 167 168 #define SPI_STATISTICS_SHOW(field) \ 169 SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \ 170 field) 171 172 SPI_STATISTICS_SHOW(messages); 173 SPI_STATISTICS_SHOW(transfers); 174 SPI_STATISTICS_SHOW(errors); 175 SPI_STATISTICS_SHOW(timedout); 176 177 SPI_STATISTICS_SHOW(spi_sync); 178 SPI_STATISTICS_SHOW(spi_sync_immediate); 179 SPI_STATISTICS_SHOW(spi_async); 180 181 SPI_STATISTICS_SHOW(bytes); 182 SPI_STATISTICS_SHOW(bytes_rx); 183 SPI_STATISTICS_SHOW(bytes_tx); 184 185 #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \ 186 SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \ 187 "transfer_bytes_histo_" number, \ 188 transfer_bytes_histo[index]) 189 SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1"); 190 SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3"); 191 SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7"); 192 SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15"); 193 SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31"); 194 SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63"); 195 SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127"); 196 SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255"); 197 SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511"); 198 SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023"); 199 SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047"); 200 SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095"); 201 SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191"); 202 SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383"); 203 SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767"); 204 SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535"); 205 SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+"); 206 207 SPI_STATISTICS_SHOW(transfers_split_maxsize); 208 209 static struct attribute *spi_dev_attrs[] = { 210 &dev_attr_modalias.attr, 211 &dev_attr_driver_override.attr, 212 NULL, 213 }; 214 215 static const struct attribute_group spi_dev_group = { 216 .attrs = spi_dev_attrs, 217 }; 218 219 static struct attribute *spi_device_statistics_attrs[] = { 220 &dev_attr_spi_device_messages.attr, 221 &dev_attr_spi_device_transfers.attr, 222 &dev_attr_spi_device_errors.attr, 223 &dev_attr_spi_device_timedout.attr, 224 &dev_attr_spi_device_spi_sync.attr, 225 &dev_attr_spi_device_spi_sync_immediate.attr, 226 &dev_attr_spi_device_spi_async.attr, 227 &dev_attr_spi_device_bytes.attr, 228 &dev_attr_spi_device_bytes_rx.attr, 229 &dev_attr_spi_device_bytes_tx.attr, 230 &dev_attr_spi_device_transfer_bytes_histo0.attr, 231 &dev_attr_spi_device_transfer_bytes_histo1.attr, 232 &dev_attr_spi_device_transfer_bytes_histo2.attr, 233 &dev_attr_spi_device_transfer_bytes_histo3.attr, 234 &dev_attr_spi_device_transfer_bytes_histo4.attr, 235 &dev_attr_spi_device_transfer_bytes_histo5.attr, 236 &dev_attr_spi_device_transfer_bytes_histo6.attr, 237 &dev_attr_spi_device_transfer_bytes_histo7.attr, 238 &dev_attr_spi_device_transfer_bytes_histo8.attr, 239 &dev_attr_spi_device_transfer_bytes_histo9.attr, 240 &dev_attr_spi_device_transfer_bytes_histo10.attr, 241 &dev_attr_spi_device_transfer_bytes_histo11.attr, 242 &dev_attr_spi_device_transfer_bytes_histo12.attr, 243 &dev_attr_spi_device_transfer_bytes_histo13.attr, 244 &dev_attr_spi_device_transfer_bytes_histo14.attr, 245 &dev_attr_spi_device_transfer_bytes_histo15.attr, 246 &dev_attr_spi_device_transfer_bytes_histo16.attr, 247 &dev_attr_spi_device_transfers_split_maxsize.attr, 248 NULL, 249 }; 250 251 static const struct attribute_group spi_device_statistics_group = { 252 .name = "statistics", 253 .attrs = spi_device_statistics_attrs, 254 }; 255 256 static const struct attribute_group *spi_dev_groups[] = { 257 &spi_dev_group, 258 &spi_device_statistics_group, 259 NULL, 260 }; 261 262 static struct attribute *spi_controller_statistics_attrs[] = { 263 &dev_attr_spi_controller_messages.attr, 264 &dev_attr_spi_controller_transfers.attr, 265 &dev_attr_spi_controller_errors.attr, 266 &dev_attr_spi_controller_timedout.attr, 267 &dev_attr_spi_controller_spi_sync.attr, 268 &dev_attr_spi_controller_spi_sync_immediate.attr, 269 &dev_attr_spi_controller_spi_async.attr, 270 &dev_attr_spi_controller_bytes.attr, 271 &dev_attr_spi_controller_bytes_rx.attr, 272 &dev_attr_spi_controller_bytes_tx.attr, 273 &dev_attr_spi_controller_transfer_bytes_histo0.attr, 274 &dev_attr_spi_controller_transfer_bytes_histo1.attr, 275 &dev_attr_spi_controller_transfer_bytes_histo2.attr, 276 &dev_attr_spi_controller_transfer_bytes_histo3.attr, 277 &dev_attr_spi_controller_transfer_bytes_histo4.attr, 278 &dev_attr_spi_controller_transfer_bytes_histo5.attr, 279 &dev_attr_spi_controller_transfer_bytes_histo6.attr, 280 &dev_attr_spi_controller_transfer_bytes_histo7.attr, 281 &dev_attr_spi_controller_transfer_bytes_histo8.attr, 282 &dev_attr_spi_controller_transfer_bytes_histo9.attr, 283 &dev_attr_spi_controller_transfer_bytes_histo10.attr, 284 &dev_attr_spi_controller_transfer_bytes_histo11.attr, 285 &dev_attr_spi_controller_transfer_bytes_histo12.attr, 286 &dev_attr_spi_controller_transfer_bytes_histo13.attr, 287 &dev_attr_spi_controller_transfer_bytes_histo14.attr, 288 &dev_attr_spi_controller_transfer_bytes_histo15.attr, 289 &dev_attr_spi_controller_transfer_bytes_histo16.attr, 290 &dev_attr_spi_controller_transfers_split_maxsize.attr, 291 NULL, 292 }; 293 294 static const struct attribute_group spi_controller_statistics_group = { 295 .name = "statistics", 296 .attrs = spi_controller_statistics_attrs, 297 }; 298 299 static const struct attribute_group *spi_controller_groups[] = { 300 &spi_controller_statistics_group, 301 NULL, 302 }; 303 304 static void spi_statistics_add_transfer_stats(struct spi_statistics __percpu *pcpu_stats, 305 struct spi_transfer *xfer, 306 struct spi_message *msg) 307 { 308 int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1; 309 struct spi_statistics *stats; 310 311 if (l2len < 0) 312 l2len = 0; 313 314 get_cpu(); 315 stats = this_cpu_ptr(pcpu_stats); 316 u64_stats_update_begin(&stats->syncp); 317 318 u64_stats_inc(&stats->transfers); 319 u64_stats_inc(&stats->transfer_bytes_histo[l2len]); 320 321 u64_stats_add(&stats->bytes, xfer->len); 322 if (spi_valid_txbuf(msg, xfer)) 323 u64_stats_add(&stats->bytes_tx, xfer->len); 324 if (spi_valid_rxbuf(msg, xfer)) 325 u64_stats_add(&stats->bytes_rx, xfer->len); 326 327 u64_stats_update_end(&stats->syncp); 328 put_cpu(); 329 } 330 331 /* 332 * modalias support makes "modprobe $MODALIAS" new-style hotplug work, 333 * and the sysfs version makes coldplug work too. 334 */ 335 static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, const char *name) 336 { 337 while (id->name[0]) { 338 if (!strcmp(name, id->name)) 339 return id; 340 id++; 341 } 342 return NULL; 343 } 344 345 const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev) 346 { 347 const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver); 348 349 return spi_match_id(sdrv->id_table, sdev->modalias); 350 } 351 EXPORT_SYMBOL_GPL(spi_get_device_id); 352 353 const void *spi_get_device_match_data(const struct spi_device *sdev) 354 { 355 const void *match; 356 357 match = device_get_match_data(&sdev->dev); 358 if (match) 359 return match; 360 361 return (const void *)spi_get_device_id(sdev)->driver_data; 362 } 363 EXPORT_SYMBOL_GPL(spi_get_device_match_data); 364 365 static int spi_match_device(struct device *dev, const struct device_driver *drv) 366 { 367 const struct spi_device *spi = to_spi_device(dev); 368 const struct spi_driver *sdrv = to_spi_driver(drv); 369 int ret; 370 371 /* Check override first, and if set, only use the named driver */ 372 ret = device_match_driver_override(dev, drv); 373 if (ret >= 0) 374 return ret; 375 376 /* Attempt an OF style match */ 377 if (of_driver_match_device(dev, drv)) 378 return 1; 379 380 /* Then try ACPI */ 381 if (acpi_driver_match_device(dev, drv)) 382 return 1; 383 384 if (sdrv->id_table) 385 return !!spi_match_id(sdrv->id_table, spi->modalias); 386 387 return strcmp(spi->modalias, drv->name) == 0; 388 } 389 390 static int spi_uevent(const struct device *dev, struct kobj_uevent_env *env) 391 { 392 const struct spi_device *spi = to_spi_device(dev); 393 int rc; 394 395 rc = acpi_device_uevent_modalias(dev, env); 396 if (rc != -ENODEV) 397 return rc; 398 399 return add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias); 400 } 401 402 static int spi_probe(struct device *dev) 403 { 404 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 405 struct spi_device *spi = to_spi_device(dev); 406 struct fwnode_handle *fwnode = dev_fwnode(dev); 407 int ret; 408 409 ret = of_clk_set_defaults(dev->of_node, false); 410 if (ret) 411 return ret; 412 413 if (is_of_node(fwnode)) 414 spi->irq = of_irq_get(dev->of_node, 0); 415 else if (is_acpi_device_node(fwnode) && spi->irq < 0) 416 spi->irq = acpi_dev_gpio_irq_get(to_acpi_device_node(fwnode), 0); 417 if (spi->irq == -EPROBE_DEFER) 418 return dev_err_probe(dev, spi->irq, "Failed to get irq\n"); 419 if (spi->irq < 0) 420 spi->irq = 0; 421 422 ret = dev_pm_domain_attach(dev, PD_FLAG_ATTACH_POWER_ON | 423 PD_FLAG_DETACH_POWER_OFF); 424 if (ret) 425 return ret; 426 427 if (sdrv->probe) 428 ret = sdrv->probe(spi); 429 430 return ret; 431 } 432 433 static void spi_remove(struct device *dev) 434 { 435 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 436 437 if (sdrv->remove) 438 sdrv->remove(to_spi_device(dev)); 439 } 440 441 static void spi_shutdown(struct device *dev) 442 { 443 if (dev->driver) { 444 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 445 446 if (sdrv->shutdown) 447 sdrv->shutdown(to_spi_device(dev)); 448 } 449 } 450 451 const struct bus_type spi_bus_type = { 452 .name = "spi", 453 .dev_groups = spi_dev_groups, 454 .match = spi_match_device, 455 .uevent = spi_uevent, 456 .probe = spi_probe, 457 .remove = spi_remove, 458 .shutdown = spi_shutdown, 459 }; 460 EXPORT_SYMBOL_GPL(spi_bus_type); 461 462 /** 463 * __spi_register_driver - register a SPI driver 464 * @owner: owner module of the driver to register 465 * @sdrv: the driver to register 466 * Context: can sleep 467 * 468 * Return: zero on success, else a negative error code. 469 */ 470 int __spi_register_driver(struct module *owner, struct spi_driver *sdrv) 471 { 472 sdrv->driver.owner = owner; 473 sdrv->driver.bus = &spi_bus_type; 474 475 /* 476 * For Really Good Reasons we use spi: modaliases not of: 477 * modaliases for DT so module autoloading won't work if we 478 * don't have a spi_device_id as well as a compatible string. 479 */ 480 if (sdrv->driver.of_match_table) { 481 const struct of_device_id *of_id; 482 483 for (of_id = sdrv->driver.of_match_table; of_id->compatible[0]; 484 of_id++) { 485 const char *of_name; 486 487 /* Strip off any vendor prefix */ 488 of_name = strnchr(of_id->compatible, 489 sizeof(of_id->compatible), ','); 490 if (of_name) 491 of_name++; 492 else 493 of_name = of_id->compatible; 494 495 if (sdrv->id_table) { 496 const struct spi_device_id *spi_id; 497 498 spi_id = spi_match_id(sdrv->id_table, of_name); 499 if (spi_id) 500 continue; 501 } else { 502 if (strcmp(sdrv->driver.name, of_name) == 0) 503 continue; 504 } 505 506 pr_warn("SPI driver %s has no spi_device_id for %s\n", 507 sdrv->driver.name, of_id->compatible); 508 } 509 } 510 511 return driver_register(&sdrv->driver); 512 } 513 EXPORT_SYMBOL_GPL(__spi_register_driver); 514 515 /*-------------------------------------------------------------------------*/ 516 517 /* 518 * SPI devices should normally not be created by SPI device drivers; that 519 * would make them board-specific. Similarly with SPI controller drivers. 520 * Device registration normally goes into like arch/.../mach.../board-YYY.c 521 * with other readonly (flashable) information about mainboard devices. 522 */ 523 524 struct boardinfo { 525 struct list_head list; 526 struct spi_board_info board_info; 527 }; 528 529 static LIST_HEAD(board_list); 530 static LIST_HEAD(spi_controller_list); 531 532 /* 533 * Used to protect add/del operation for board_info list and 534 * spi_controller list, and their matching process also used 535 * to protect object of type struct idr. 536 */ 537 static DEFINE_MUTEX(board_lock); 538 539 /** 540 * spi_alloc_device - Allocate a new SPI device 541 * @ctlr: Controller to which device is connected 542 * Context: can sleep 543 * 544 * Allows a driver to allocate and initialize a spi_device without 545 * registering it immediately. This allows a driver to directly 546 * fill the spi_device with device parameters before calling 547 * spi_add_device() on it. 548 * 549 * Caller is responsible to call spi_add_device() on the returned 550 * spi_device structure to add it to the SPI controller. If the caller 551 * needs to discard the spi_device without adding it, then it should 552 * call spi_dev_put() on it. 553 * 554 * Return: a pointer to the new device, or NULL. 555 */ 556 struct spi_device *spi_alloc_device(struct spi_controller *ctlr) 557 { 558 struct spi_device *spi; 559 560 if (!spi_controller_get(ctlr)) 561 return NULL; 562 563 spi = kzalloc_obj(*spi); 564 if (!spi) { 565 spi_controller_put(ctlr); 566 return NULL; 567 } 568 569 spi->pcpu_statistics = spi_alloc_pcpu_stats(); 570 if (!spi->pcpu_statistics) { 571 kfree(spi); 572 spi_controller_put(ctlr); 573 return NULL; 574 } 575 576 spi->controller = ctlr; 577 spi->dev.parent = &ctlr->dev; 578 spi->dev.bus = &spi_bus_type; 579 spi->dev.release = spidev_release; 580 spi->mode = ctlr->buswidth_override_bits; 581 spi->num_chipselect = 1; 582 583 device_initialize(&spi->dev); 584 return spi; 585 } 586 EXPORT_SYMBOL_GPL(spi_alloc_device); 587 588 static void spi_dev_set_name(struct spi_device *spi) 589 { 590 struct device *dev = &spi->dev; 591 struct fwnode_handle *fwnode = dev_fwnode(dev); 592 593 if (is_acpi_device_node(fwnode)) { 594 dev_set_name(dev, "spi-%s", acpi_dev_name(to_acpi_device_node(fwnode))); 595 return; 596 } 597 598 if (is_software_node(fwnode)) { 599 dev_set_name(dev, "spi-%pfwP", fwnode); 600 return; 601 } 602 603 dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev), 604 spi_get_chipselect(spi, 0)); 605 } 606 607 /* 608 * Zero(0) is a valid physical CS value and can be located at any 609 * logical CS in the spi->chip_select[]. If all the physical CS 610 * are initialized to 0 then It would be difficult to differentiate 611 * between a valid physical CS 0 & an unused logical CS whose physical 612 * CS can be 0. As a solution to this issue initialize all the CS to -1. 613 * Now all the unused logical CS will have -1 physical CS value & can be 614 * ignored while performing physical CS validity checks. 615 */ 616 #define SPI_INVALID_CS ((s8)-1) 617 618 static inline int spi_dev_check_cs(struct device *dev, 619 struct spi_device *spi, u8 idx, 620 struct spi_device *new_spi, u8 new_idx) 621 { 622 u8 cs, cs_new; 623 u8 idx_new; 624 625 cs = spi_get_chipselect(spi, idx); 626 for (idx_new = new_idx; idx_new < new_spi->num_chipselect; idx_new++) { 627 cs_new = spi_get_chipselect(new_spi, idx_new); 628 if (cs == cs_new) { 629 dev_err(dev, "chipselect %u already in use\n", cs_new); 630 return -EBUSY; 631 } 632 } 633 return 0; 634 } 635 636 struct spi_dev_check_info { 637 struct spi_device *new_spi; 638 struct spi_device *parent; /* set for ancillary devices */ 639 }; 640 641 static int spi_dev_check(struct device *dev, void *data) 642 { 643 struct spi_device *spi = to_spi_device(dev); 644 struct spi_dev_check_info *info = data; 645 struct spi_device *new_spi = info->new_spi; 646 int status, idx; 647 648 /* 649 * When registering an ancillary device, skip checking against the 650 * parent device since the ancillary is intentionally using one of 651 * the parent's chip selects. 652 */ 653 if (info->parent && spi == info->parent) 654 return 0; 655 656 if (spi->controller == new_spi->controller) { 657 for (idx = 0; idx < spi->num_chipselect; idx++) { 658 status = spi_dev_check_cs(dev, spi, idx, new_spi, 0); 659 if (status) 660 return status; 661 } 662 } 663 return 0; 664 } 665 666 static void spi_cleanup(struct spi_device *spi) 667 { 668 if (spi->controller->cleanup) 669 spi->controller->cleanup(spi); 670 } 671 672 static int __spi_add_device(struct spi_device *spi, struct spi_device *parent) 673 { 674 struct spi_controller *ctlr = spi->controller; 675 struct device *dev = ctlr->dev.parent; 676 struct spi_dev_check_info check_info; 677 int status, idx; 678 u8 cs; 679 680 if (spi->num_chipselect > SPI_DEVICE_CS_CNT_MAX) { 681 dev_err(dev, "num_cs %d > max %d\n", spi->num_chipselect, 682 SPI_DEVICE_CS_CNT_MAX); 683 return -EOVERFLOW; 684 } 685 686 for (idx = 0; idx < spi->num_chipselect; idx++) { 687 /* Chipselects are numbered 0..max; validate. */ 688 cs = spi_get_chipselect(spi, idx); 689 if (cs >= ctlr->num_chipselect) { 690 dev_err(dev, "cs%d >= max %d\n", spi_get_chipselect(spi, idx), 691 ctlr->num_chipselect); 692 return -EINVAL; 693 } 694 } 695 696 /* 697 * Make sure that multiple logical CS doesn't map to the same physical CS. 698 * For example, spi->chip_select[0] != spi->chip_select[1] and so on. 699 */ 700 if (!spi_controller_is_target(ctlr)) { 701 for (idx = 0; idx < spi->num_chipselect; idx++) { 702 status = spi_dev_check_cs(dev, spi, idx, spi, idx + 1); 703 if (status) 704 return status; 705 } 706 } 707 708 /* Initialize unused logical CS as invalid */ 709 for (idx = spi->num_chipselect; idx < SPI_DEVICE_CS_CNT_MAX; idx++) 710 spi_set_chipselect(spi, idx, SPI_INVALID_CS); 711 712 /* Set the bus ID string */ 713 spi_dev_set_name(spi); 714 715 /* 716 * We need to make sure there's no other device with this 717 * chipselect **BEFORE** we call setup(), else we'll trash 718 * its configuration. 719 */ 720 check_info.new_spi = spi; 721 check_info.parent = parent; 722 status = bus_for_each_dev(&spi_bus_type, NULL, &check_info, spi_dev_check); 723 if (status) 724 return status; 725 726 /* Controller may unregister concurrently */ 727 if (IS_ENABLED(CONFIG_SPI_DYNAMIC) && 728 !device_is_registered(&ctlr->dev)) { 729 return -ENODEV; 730 } 731 732 if (ctlr->cs_gpiods) { 733 u8 cs; 734 735 for (idx = 0; idx < spi->num_chipselect; idx++) { 736 cs = spi_get_chipselect(spi, idx); 737 spi_set_csgpiod(spi, idx, ctlr->cs_gpiods[cs]); 738 } 739 } 740 741 /* 742 * Drivers may modify this initial i/o setup, but will 743 * normally rely on the device being setup. Devices 744 * using SPI_CS_HIGH can't coexist well otherwise... 745 */ 746 status = spi_setup(spi); 747 if (status < 0) { 748 dev_err(dev, "can't setup %s, status %d\n", 749 dev_name(&spi->dev), status); 750 return status; 751 } 752 753 /* Device may be bound to an active driver when this returns */ 754 status = device_add(&spi->dev); 755 if (status < 0) { 756 dev_err(dev, "can't add %s, status %d\n", 757 dev_name(&spi->dev), status); 758 spi_cleanup(spi); 759 } else { 760 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev)); 761 } 762 763 return status; 764 } 765 766 /** 767 * spi_add_device - Add spi_device allocated with spi_alloc_device 768 * @spi: spi_device to register 769 * 770 * Companion function to spi_alloc_device. Devices allocated with 771 * spi_alloc_device can be added onto the SPI bus with this function. 772 * 773 * Return: 0 on success; negative errno on failure 774 */ 775 int spi_add_device(struct spi_device *spi) 776 { 777 struct spi_controller *ctlr = spi->controller; 778 int status; 779 780 /* Set the bus ID string */ 781 spi_dev_set_name(spi); 782 783 mutex_lock(&ctlr->add_lock); 784 status = __spi_add_device(spi, NULL); 785 mutex_unlock(&ctlr->add_lock); 786 return status; 787 } 788 EXPORT_SYMBOL_GPL(spi_add_device); 789 790 /** 791 * spi_new_device - instantiate one new SPI device 792 * @ctlr: Controller to which device is connected 793 * @chip: Describes the SPI device 794 * Context: can sleep 795 * 796 * On typical mainboards, this is purely internal; and it's not needed 797 * after board init creates the hard-wired devices. Some development 798 * platforms may not be able to use spi_register_board_info though, and 799 * this is exported so that for example a USB or parport based adapter 800 * driver could add devices (which it would learn about out-of-band). 801 * 802 * Return: the new device, or NULL. 803 */ 804 struct spi_device *spi_new_device(struct spi_controller *ctlr, 805 struct spi_board_info *chip) 806 { 807 struct spi_device *proxy; 808 int status; 809 810 /* 811 * NOTE: caller did any chip->bus_num checks necessary. 812 * 813 * Also, unless we change the return value convention to use 814 * error-or-pointer (not NULL-or-pointer), troubleshootability 815 * suggests syslogged diagnostics are best here (ugh). 816 */ 817 818 proxy = spi_alloc_device(ctlr); 819 if (!proxy) 820 return NULL; 821 822 WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias)); 823 824 /* Use provided chip-select for proxy device */ 825 spi_set_chipselect(proxy, 0, chip->chip_select); 826 827 proxy->max_speed_hz = chip->max_speed_hz; 828 proxy->mode = chip->mode; 829 proxy->irq = chip->irq; 830 strscpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias)); 831 proxy->dev.platform_data = (void *) chip->platform_data; 832 proxy->controller_data = chip->controller_data; 833 proxy->controller_state = NULL; 834 /* 835 * By default spi->chip_select[0] will hold the physical CS number, 836 * so set bit 0 in spi->cs_index_mask. 837 */ 838 proxy->cs_index_mask = BIT(0); 839 840 if (chip->swnode) { 841 status = device_add_software_node(&proxy->dev, chip->swnode); 842 if (status) { 843 dev_err(&ctlr->dev, "failed to add software node to '%s': %d\n", 844 chip->modalias, status); 845 goto err_dev_put; 846 } 847 } 848 849 status = spi_add_device(proxy); 850 if (status < 0) 851 goto err_dev_put; 852 853 return proxy; 854 855 err_dev_put: 856 device_remove_software_node(&proxy->dev); 857 spi_dev_put(proxy); 858 return NULL; 859 } 860 EXPORT_SYMBOL_GPL(spi_new_device); 861 862 /** 863 * spi_unregister_device - unregister a single SPI device 864 * @spi: spi_device to unregister 865 * 866 * Start making the passed SPI device vanish. Normally this would be handled 867 * by spi_unregister_controller(). 868 */ 869 void spi_unregister_device(struct spi_device *spi) 870 { 871 struct fwnode_handle *fwnode; 872 873 if (!spi) 874 return; 875 876 fwnode = dev_fwnode(&spi->dev); 877 if (is_of_node(fwnode)) { 878 of_node_clear_flag(to_of_node(fwnode), OF_POPULATED); 879 of_node_put(to_of_node(fwnode)); 880 } else if (is_acpi_device_node(fwnode)) { 881 acpi_device_clear_enumerated(to_acpi_device_node(fwnode)); 882 } 883 device_remove_software_node(&spi->dev); 884 device_del(&spi->dev); 885 spi_cleanup(spi); 886 put_device(&spi->dev); 887 } 888 EXPORT_SYMBOL_GPL(spi_unregister_device); 889 890 static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr, 891 struct spi_board_info *bi) 892 { 893 struct spi_device *dev; 894 895 if (ctlr->bus_num != bi->bus_num) 896 return; 897 898 dev = spi_new_device(ctlr, bi); 899 if (!dev) 900 dev_err(ctlr->dev.parent, "can't create new device for %s\n", 901 bi->modalias); 902 } 903 904 /** 905 * spi_register_board_info - register SPI devices for a given board 906 * @info: array of chip descriptors 907 * @n: how many descriptors are provided 908 * Context: can sleep 909 * 910 * Board-specific early init code calls this (probably during arch_initcall) 911 * with segments of the SPI device table. Any device nodes are created later, 912 * after the relevant parent SPI controller (bus_num) is defined. We keep 913 * this table of devices forever, so that reloading a controller driver will 914 * not make Linux forget about these hard-wired devices. 915 * 916 * Other code can also call this, e.g. a particular add-on board might provide 917 * SPI devices through its expansion connector, so code initializing that board 918 * would naturally declare its SPI devices. 919 * 920 * The board info passed can safely be __initdata ... but be careful of 921 * any embedded pointers (platform_data, etc), they're copied as-is. 922 * 923 * Return: zero on success, else a negative error code. 924 */ 925 int spi_register_board_info(struct spi_board_info const *info, unsigned n) 926 { 927 struct boardinfo *bi; 928 int i; 929 930 if (!n) 931 return 0; 932 933 bi = kzalloc_objs(*bi, n); 934 if (!bi) 935 return -ENOMEM; 936 937 for (i = 0; i < n; i++, bi++, info++) { 938 struct spi_controller *ctlr; 939 940 memcpy(&bi->board_info, info, sizeof(*info)); 941 942 mutex_lock(&board_lock); 943 list_add_tail(&bi->list, &board_list); 944 list_for_each_entry(ctlr, &spi_controller_list, list) 945 spi_match_controller_to_boardinfo(ctlr, 946 &bi->board_info); 947 mutex_unlock(&board_lock); 948 } 949 950 return 0; 951 } 952 953 /*-------------------------------------------------------------------------*/ 954 955 /* Core methods for SPI resource management */ 956 957 /** 958 * spi_res_alloc - allocate a spi resource that is life-cycle managed 959 * during the processing of a spi_message while using 960 * spi_transfer_one 961 * @spi: the SPI device for which we allocate memory 962 * @release: the release code to execute for this resource 963 * @size: size to alloc and return 964 * @gfp: GFP allocation flags 965 * 966 * Return: the pointer to the allocated data 967 * 968 * This may get enhanced in the future to allocate from a memory pool 969 * of the @spi_device or @spi_controller to avoid repeated allocations. 970 */ 971 static void *spi_res_alloc(struct spi_device *spi, spi_res_release_t release, 972 size_t size, gfp_t gfp) 973 { 974 struct spi_res *sres; 975 976 sres = kzalloc(sizeof(*sres) + size, gfp); 977 if (!sres) 978 return NULL; 979 980 INIT_LIST_HEAD(&sres->entry); 981 sres->release = release; 982 983 return sres->data; 984 } 985 986 /** 987 * spi_res_free - free an SPI resource 988 * @res: pointer to the custom data of a resource 989 */ 990 static void spi_res_free(void *res) 991 { 992 struct spi_res *sres = container_of(res, struct spi_res, data); 993 994 WARN_ON(!list_empty(&sres->entry)); 995 kfree(sres); 996 } 997 998 /** 999 * spi_res_add - add a spi_res to the spi_message 1000 * @message: the SPI message 1001 * @res: the spi_resource 1002 */ 1003 static void spi_res_add(struct spi_message *message, void *res) 1004 { 1005 struct spi_res *sres = container_of(res, struct spi_res, data); 1006 1007 WARN_ON(!list_empty(&sres->entry)); 1008 list_add_tail(&sres->entry, &message->resources); 1009 } 1010 1011 /** 1012 * spi_res_release - release all SPI resources for this message 1013 * @ctlr: the @spi_controller 1014 * @message: the @spi_message 1015 */ 1016 static void spi_res_release(struct spi_controller *ctlr, struct spi_message *message) 1017 { 1018 struct spi_res *res, *tmp; 1019 1020 list_for_each_entry_safe_reverse(res, tmp, &message->resources, entry) { 1021 if (res->release) 1022 res->release(ctlr, message, res->data); 1023 1024 list_del(&res->entry); 1025 1026 kfree(res); 1027 } 1028 } 1029 1030 /*-------------------------------------------------------------------------*/ 1031 #define spi_for_each_valid_cs(spi, idx) \ 1032 for (idx = 0; idx < spi->num_chipselect; idx++) \ 1033 if (!(spi->cs_index_mask & BIT(idx))) {} else 1034 1035 static inline bool spi_is_last_cs(struct spi_device *spi) 1036 { 1037 u8 idx; 1038 bool last = false; 1039 1040 spi_for_each_valid_cs(spi, idx) { 1041 if (spi->controller->last_cs[idx] == spi_get_chipselect(spi, idx)) 1042 last = true; 1043 } 1044 return last; 1045 } 1046 1047 static void spi_toggle_csgpiod(struct spi_device *spi, u8 idx, bool enable, bool activate) 1048 { 1049 /* 1050 * Historically ACPI has no means of the GPIO polarity and 1051 * thus the SPISerialBus() resource defines it on the per-chip 1052 * basis. In order to avoid a chain of negations, the GPIO 1053 * polarity is considered being Active High. Even for the cases 1054 * when _DSD() is involved (in the updated versions of ACPI) 1055 * the GPIO CS polarity must be defined Active High to avoid 1056 * ambiguity. That's why we use enable, that takes SPI_CS_HIGH 1057 * into account. 1058 */ 1059 if (is_acpi_device_node(dev_fwnode(&spi->dev))) 1060 gpiod_set_value_cansleep(spi_get_csgpiod(spi, idx), !enable); 1061 else 1062 /* Polarity handled by GPIO library */ 1063 gpiod_set_value_cansleep(spi_get_csgpiod(spi, idx), activate); 1064 1065 if (activate) 1066 spi_delay_exec(&spi->cs_setup, NULL); 1067 else 1068 spi_delay_exec(&spi->cs_inactive, NULL); 1069 } 1070 1071 static void spi_set_cs(struct spi_device *spi, bool enable, bool force) 1072 { 1073 bool activate = enable; 1074 u8 idx; 1075 1076 /* 1077 * Avoid calling into the driver (or doing delays) if the chip select 1078 * isn't actually changing from the last time this was called. 1079 */ 1080 if (!force && (enable == spi_is_last_cs(spi)) && 1081 (spi->controller->last_cs_index_mask == spi->cs_index_mask) && 1082 (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH))) 1083 return; 1084 1085 trace_spi_set_cs(spi, activate); 1086 1087 spi->controller->last_cs_index_mask = spi->cs_index_mask; 1088 for (idx = 0; idx < SPI_DEVICE_CS_CNT_MAX; idx++) { 1089 if (enable && idx < spi->num_chipselect) 1090 spi->controller->last_cs[idx] = spi_get_chipselect(spi, 0); 1091 else 1092 spi->controller->last_cs[idx] = SPI_INVALID_CS; 1093 } 1094 1095 spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH; 1096 if (spi->controller->last_cs_mode_high) 1097 enable = !enable; 1098 1099 /* 1100 * Handle chip select delays for GPIO based CS or controllers without 1101 * programmable chip select timing. 1102 */ 1103 if ((spi_is_csgpiod(spi) || !spi->controller->set_cs_timing) && !activate) 1104 spi_delay_exec(&spi->cs_hold, NULL); 1105 1106 if (spi_is_csgpiod(spi)) { 1107 if (!(spi->mode & SPI_NO_CS)) { 1108 spi_for_each_valid_cs(spi, idx) { 1109 if (spi_get_csgpiod(spi, idx)) 1110 spi_toggle_csgpiod(spi, idx, enable, activate); 1111 } 1112 } 1113 /* Some SPI controllers need both GPIO CS & ->set_cs() */ 1114 if ((spi->controller->flags & SPI_CONTROLLER_GPIO_SS) && 1115 spi->controller->set_cs) 1116 spi->controller->set_cs(spi, !enable); 1117 } else if (spi->controller->set_cs) { 1118 spi->controller->set_cs(spi, !enable); 1119 } 1120 1121 if (spi_is_csgpiod(spi) || !spi->controller->set_cs_timing) { 1122 if (activate) 1123 spi_delay_exec(&spi->cs_setup, NULL); 1124 else 1125 spi_delay_exec(&spi->cs_inactive, NULL); 1126 } 1127 } 1128 1129 #ifdef CONFIG_HAS_DMA 1130 static int spi_map_buf_attrs(struct spi_controller *ctlr, struct device *dev, 1131 struct sg_table *sgt, void *buf, size_t len, 1132 enum dma_data_direction dir, unsigned long attrs) 1133 { 1134 const bool vmalloced_buf = is_vmalloc_addr(buf); 1135 unsigned int max_seg_size = dma_get_max_seg_size(dev); 1136 #ifdef CONFIG_HIGHMEM 1137 const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE && 1138 (unsigned long)buf < (PKMAP_BASE + 1139 (LAST_PKMAP * PAGE_SIZE))); 1140 #else 1141 const bool kmap_buf = false; 1142 #endif 1143 int desc_len; 1144 int sgs; 1145 struct page *vm_page; 1146 struct scatterlist *sg; 1147 void *sg_buf; 1148 size_t min; 1149 int i, ret; 1150 1151 if (vmalloced_buf || kmap_buf) { 1152 desc_len = min_t(unsigned long, max_seg_size, PAGE_SIZE); 1153 sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len); 1154 } else if (virt_addr_valid(buf)) { 1155 desc_len = min_t(size_t, max_seg_size, ctlr->max_dma_len); 1156 sgs = DIV_ROUND_UP(len, desc_len); 1157 } else { 1158 return -EINVAL; 1159 } 1160 1161 ret = sg_alloc_table(sgt, sgs, GFP_KERNEL); 1162 if (ret != 0) 1163 return ret; 1164 1165 sg = &sgt->sgl[0]; 1166 for (i = 0; i < sgs; i++) { 1167 1168 if (vmalloced_buf || kmap_buf) { 1169 /* 1170 * Next scatterlist entry size is the minimum between 1171 * the desc_len and the remaining buffer length that 1172 * fits in a page. 1173 */ 1174 min = min_t(size_t, desc_len, 1175 min_t(size_t, len, 1176 PAGE_SIZE - offset_in_page(buf))); 1177 if (vmalloced_buf) 1178 vm_page = vmalloc_to_page(buf); 1179 else 1180 vm_page = kmap_to_page(buf); 1181 if (!vm_page) { 1182 sg_free_table(sgt); 1183 return -ENOMEM; 1184 } 1185 sg_set_page(sg, vm_page, 1186 min, offset_in_page(buf)); 1187 } else { 1188 min = min_t(size_t, len, desc_len); 1189 sg_buf = buf; 1190 sg_set_buf(sg, sg_buf, min); 1191 } 1192 1193 buf += min; 1194 len -= min; 1195 sg = sg_next(sg); 1196 } 1197 1198 ret = dma_map_sgtable(dev, sgt, dir, attrs); 1199 if (ret < 0) { 1200 sg_free_table(sgt); 1201 return ret; 1202 } 1203 1204 return 0; 1205 } 1206 1207 int spi_map_buf(struct spi_controller *ctlr, struct device *dev, 1208 struct sg_table *sgt, void *buf, size_t len, 1209 enum dma_data_direction dir) 1210 { 1211 return spi_map_buf_attrs(ctlr, dev, sgt, buf, len, dir, 0); 1212 } 1213 1214 static void spi_unmap_buf_attrs(struct spi_controller *ctlr, 1215 struct device *dev, struct sg_table *sgt, 1216 enum dma_data_direction dir, 1217 unsigned long attrs) 1218 { 1219 dma_unmap_sgtable(dev, sgt, dir, attrs); 1220 sg_free_table(sgt); 1221 sgt->orig_nents = 0; 1222 sgt->nents = 0; 1223 } 1224 1225 void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev, 1226 struct sg_table *sgt, enum dma_data_direction dir) 1227 { 1228 spi_unmap_buf_attrs(ctlr, dev, sgt, dir, 0); 1229 } 1230 1231 static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg) 1232 { 1233 struct device *tx_dev, *rx_dev; 1234 struct spi_transfer *xfer; 1235 int ret; 1236 1237 if (!ctlr->can_dma) 1238 return 0; 1239 1240 if (ctlr->dma_tx) 1241 tx_dev = ctlr->dma_tx->device->dev; 1242 else if (ctlr->dma_map_dev) 1243 tx_dev = ctlr->dma_map_dev; 1244 else 1245 tx_dev = ctlr->dev.parent; 1246 1247 if (ctlr->dma_rx) 1248 rx_dev = ctlr->dma_rx->device->dev; 1249 else if (ctlr->dma_map_dev) 1250 rx_dev = ctlr->dma_map_dev; 1251 else 1252 rx_dev = ctlr->dev.parent; 1253 1254 ret = -ENOMSG; 1255 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1256 /* The sync is done before each transfer. */ 1257 unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC; 1258 1259 if (!ctlr->can_dma(ctlr, msg->spi, xfer)) 1260 continue; 1261 1262 if (xfer->tx_buf != NULL) { 1263 ret = spi_map_buf_attrs(ctlr, tx_dev, &xfer->tx_sg, 1264 (void *)xfer->tx_buf, 1265 xfer->len, DMA_TO_DEVICE, 1266 attrs); 1267 if (ret != 0) 1268 return ret; 1269 1270 xfer->tx_sg_mapped = true; 1271 } 1272 1273 if (xfer->rx_buf != NULL) { 1274 ret = spi_map_buf_attrs(ctlr, rx_dev, &xfer->rx_sg, 1275 xfer->rx_buf, xfer->len, 1276 DMA_FROM_DEVICE, attrs); 1277 if (ret != 0) { 1278 spi_unmap_buf_attrs(ctlr, tx_dev, 1279 &xfer->tx_sg, DMA_TO_DEVICE, 1280 attrs); 1281 1282 return ret; 1283 } 1284 1285 xfer->rx_sg_mapped = true; 1286 } 1287 } 1288 /* No transfer has been mapped, bail out with success */ 1289 if (ret) 1290 return 0; 1291 1292 ctlr->cur_rx_dma_dev = rx_dev; 1293 ctlr->cur_tx_dma_dev = tx_dev; 1294 1295 return 0; 1296 } 1297 1298 static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg) 1299 { 1300 struct device *rx_dev = ctlr->cur_rx_dma_dev; 1301 struct device *tx_dev = ctlr->cur_tx_dma_dev; 1302 struct spi_transfer *xfer; 1303 1304 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1305 /* The sync has already been done after each transfer. */ 1306 unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC; 1307 1308 if (xfer->rx_sg_mapped) 1309 spi_unmap_buf_attrs(ctlr, rx_dev, &xfer->rx_sg, 1310 DMA_FROM_DEVICE, attrs); 1311 xfer->rx_sg_mapped = false; 1312 1313 if (xfer->tx_sg_mapped) 1314 spi_unmap_buf_attrs(ctlr, tx_dev, &xfer->tx_sg, 1315 DMA_TO_DEVICE, attrs); 1316 xfer->tx_sg_mapped = false; 1317 } 1318 1319 return 0; 1320 } 1321 1322 static void spi_dma_sync_for_device(struct spi_controller *ctlr, 1323 struct spi_transfer *xfer) 1324 { 1325 struct device *rx_dev = ctlr->cur_rx_dma_dev; 1326 struct device *tx_dev = ctlr->cur_tx_dma_dev; 1327 1328 if (xfer->tx_sg_mapped) 1329 dma_sync_sgtable_for_device(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); 1330 if (xfer->rx_sg_mapped) 1331 dma_sync_sgtable_for_device(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); 1332 } 1333 1334 static void spi_dma_sync_for_cpu(struct spi_controller *ctlr, 1335 struct spi_transfer *xfer) 1336 { 1337 struct device *rx_dev = ctlr->cur_rx_dma_dev; 1338 struct device *tx_dev = ctlr->cur_tx_dma_dev; 1339 1340 if (xfer->rx_sg_mapped) 1341 dma_sync_sgtable_for_cpu(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); 1342 if (xfer->tx_sg_mapped) 1343 dma_sync_sgtable_for_cpu(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); 1344 } 1345 #else /* !CONFIG_HAS_DMA */ 1346 static inline int __spi_map_msg(struct spi_controller *ctlr, 1347 struct spi_message *msg) 1348 { 1349 return 0; 1350 } 1351 1352 static inline int __spi_unmap_msg(struct spi_controller *ctlr, 1353 struct spi_message *msg) 1354 { 1355 return 0; 1356 } 1357 1358 static void spi_dma_sync_for_device(struct spi_controller *ctrl, 1359 struct spi_transfer *xfer) 1360 { 1361 } 1362 1363 static void spi_dma_sync_for_cpu(struct spi_controller *ctrl, 1364 struct spi_transfer *xfer) 1365 { 1366 } 1367 #endif /* !CONFIG_HAS_DMA */ 1368 1369 static inline int spi_unmap_msg(struct spi_controller *ctlr, 1370 struct spi_message *msg) 1371 { 1372 struct spi_transfer *xfer; 1373 1374 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1375 /* 1376 * Restore the original value of tx_buf or rx_buf if they are 1377 * NULL. 1378 */ 1379 if (xfer->tx_buf == ctlr->dummy_tx) 1380 xfer->tx_buf = NULL; 1381 if (xfer->rx_buf == ctlr->dummy_rx) 1382 xfer->rx_buf = NULL; 1383 } 1384 1385 return __spi_unmap_msg(ctlr, msg); 1386 } 1387 1388 static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg) 1389 { 1390 struct spi_transfer *xfer; 1391 void *tmp; 1392 unsigned int max_tx, max_rx; 1393 1394 if ((ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX)) 1395 && !(msg->spi->mode & SPI_3WIRE)) { 1396 max_tx = 0; 1397 max_rx = 0; 1398 1399 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1400 if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) && 1401 !xfer->tx_buf) 1402 max_tx = max(xfer->len, max_tx); 1403 if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) && 1404 !xfer->rx_buf) 1405 max_rx = max(xfer->len, max_rx); 1406 } 1407 1408 if (max_tx) { 1409 tmp = krealloc(ctlr->dummy_tx, max_tx, 1410 GFP_KERNEL | GFP_DMA | __GFP_ZERO); 1411 if (!tmp) 1412 return -ENOMEM; 1413 ctlr->dummy_tx = tmp; 1414 } 1415 1416 if (max_rx) { 1417 tmp = krealloc(ctlr->dummy_rx, max_rx, 1418 GFP_KERNEL | GFP_DMA); 1419 if (!tmp) 1420 return -ENOMEM; 1421 ctlr->dummy_rx = tmp; 1422 } 1423 1424 if (max_tx || max_rx) { 1425 list_for_each_entry(xfer, &msg->transfers, 1426 transfer_list) { 1427 if (!xfer->len) 1428 continue; 1429 if (!xfer->tx_buf) 1430 xfer->tx_buf = ctlr->dummy_tx; 1431 if (!xfer->rx_buf) 1432 xfer->rx_buf = ctlr->dummy_rx; 1433 } 1434 } 1435 } 1436 1437 return __spi_map_msg(ctlr, msg); 1438 } 1439 1440 static int spi_transfer_wait(struct spi_controller *ctlr, 1441 struct spi_message *msg, 1442 struct spi_transfer *xfer) 1443 { 1444 struct spi_statistics __percpu *statm = ctlr->pcpu_statistics; 1445 struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics; 1446 u32 speed_hz = xfer->speed_hz; 1447 unsigned long long ms; 1448 1449 if (spi_controller_is_target(ctlr)) { 1450 if (wait_for_completion_interruptible(&ctlr->xfer_completion)) { 1451 dev_dbg(&msg->spi->dev, "SPI transfer interrupted\n"); 1452 return -EINTR; 1453 } 1454 } else { 1455 if (!speed_hz) 1456 speed_hz = 100000; 1457 1458 /* 1459 * For each byte we wait for 8 cycles of the SPI clock. 1460 * Since speed is defined in Hz and we want milliseconds, 1461 * use respective multiplier, but before the division, 1462 * otherwise we may get 0 for short transfers. 1463 */ 1464 ms = 8LL * MSEC_PER_SEC * xfer->len; 1465 do_div(ms, speed_hz); 1466 1467 /* 1468 * Increase it twice and add 200 ms tolerance, use 1469 * predefined maximum in case of overflow. 1470 */ 1471 ms += ms + 200; 1472 if (ms > UINT_MAX) 1473 ms = UINT_MAX; 1474 1475 ms = wait_for_completion_timeout(&ctlr->xfer_completion, 1476 msecs_to_jiffies(ms)); 1477 1478 if (ms == 0) { 1479 SPI_STATISTICS_INCREMENT_FIELD(statm, timedout); 1480 SPI_STATISTICS_INCREMENT_FIELD(stats, timedout); 1481 dev_err(&msg->spi->dev, 1482 "SPI transfer timed out\n"); 1483 return -ETIMEDOUT; 1484 } 1485 1486 if (xfer->error & SPI_TRANS_FAIL_IO) 1487 return -EIO; 1488 } 1489 1490 return 0; 1491 } 1492 1493 static void _spi_transfer_delay_ns(u32 ns) 1494 { 1495 if (!ns) 1496 return; 1497 if (ns <= NSEC_PER_USEC) { 1498 ndelay(ns); 1499 } else { 1500 u32 us = DIV_ROUND_UP(ns, NSEC_PER_USEC); 1501 1502 fsleep(us); 1503 } 1504 } 1505 1506 int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer) 1507 { 1508 u32 delay = _delay->value; 1509 u32 unit = _delay->unit; 1510 u32 hz; 1511 1512 if (!delay) 1513 return 0; 1514 1515 switch (unit) { 1516 case SPI_DELAY_UNIT_USECS: 1517 delay *= NSEC_PER_USEC; 1518 break; 1519 case SPI_DELAY_UNIT_NSECS: 1520 /* Nothing to do here */ 1521 break; 1522 case SPI_DELAY_UNIT_SCK: 1523 /* Clock cycles need to be obtained from spi_transfer */ 1524 if (!xfer) 1525 return -EINVAL; 1526 /* 1527 * If there is unknown effective speed, approximate it 1528 * by underestimating with half of the requested Hz. 1529 */ 1530 hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2; 1531 if (!hz) 1532 return -EINVAL; 1533 1534 /* Convert delay to nanoseconds */ 1535 delay *= DIV_ROUND_UP(NSEC_PER_SEC, hz); 1536 break; 1537 default: 1538 return -EINVAL; 1539 } 1540 1541 return delay; 1542 } 1543 EXPORT_SYMBOL_GPL(spi_delay_to_ns); 1544 1545 int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer) 1546 { 1547 int delay; 1548 1549 might_sleep(); 1550 1551 if (!_delay) 1552 return -EINVAL; 1553 1554 delay = spi_delay_to_ns(_delay, xfer); 1555 if (delay < 0) 1556 return delay; 1557 1558 _spi_transfer_delay_ns(delay); 1559 1560 return 0; 1561 } 1562 EXPORT_SYMBOL_GPL(spi_delay_exec); 1563 1564 static void _spi_transfer_cs_change_delay(struct spi_message *msg, 1565 struct spi_transfer *xfer) 1566 { 1567 u32 default_delay_ns = 10 * NSEC_PER_USEC; 1568 u32 delay = xfer->cs_change_delay.value; 1569 u32 unit = xfer->cs_change_delay.unit; 1570 int ret; 1571 1572 /* Return early on "fast" mode - for everything but USECS */ 1573 if (!delay) { 1574 if (unit == SPI_DELAY_UNIT_USECS) 1575 _spi_transfer_delay_ns(default_delay_ns); 1576 return; 1577 } 1578 1579 ret = spi_delay_exec(&xfer->cs_change_delay, xfer); 1580 if (ret) { 1581 dev_err_once(&msg->spi->dev, 1582 "Use of unsupported delay unit %i, using default of %luus\n", 1583 unit, default_delay_ns / NSEC_PER_USEC); 1584 _spi_transfer_delay_ns(default_delay_ns); 1585 } 1586 } 1587 1588 void spi_transfer_cs_change_delay_exec(struct spi_message *msg, 1589 struct spi_transfer *xfer) 1590 { 1591 _spi_transfer_cs_change_delay(msg, xfer); 1592 } 1593 EXPORT_SYMBOL_GPL(spi_transfer_cs_change_delay_exec); 1594 1595 /* 1596 * spi_transfer_one_message - Default implementation of transfer_one_message() 1597 * 1598 * This is a standard implementation of transfer_one_message() for 1599 * drivers which implement a transfer_one() operation. It provides 1600 * standard handling of delays and chip select management. 1601 */ 1602 static int spi_transfer_one_message(struct spi_controller *ctlr, 1603 struct spi_message *msg) 1604 { 1605 struct spi_transfer *xfer; 1606 bool keep_cs = false; 1607 int ret = 0; 1608 struct spi_statistics __percpu *statm = ctlr->pcpu_statistics; 1609 struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics; 1610 1611 xfer = list_first_entry(&msg->transfers, struct spi_transfer, transfer_list); 1612 spi_set_cs(msg->spi, !xfer->cs_off, false); 1613 1614 SPI_STATISTICS_INCREMENT_FIELD(statm, messages); 1615 SPI_STATISTICS_INCREMENT_FIELD(stats, messages); 1616 1617 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1618 trace_spi_transfer_start(msg, xfer); 1619 1620 spi_statistics_add_transfer_stats(statm, xfer, msg); 1621 spi_statistics_add_transfer_stats(stats, xfer, msg); 1622 1623 if (!ctlr->ptp_sts_supported) { 1624 xfer->ptp_sts_word_pre = 0; 1625 ptp_read_system_prets(xfer->ptp_sts); 1626 } 1627 1628 if ((xfer->tx_buf || xfer->rx_buf) && xfer->len) { 1629 reinit_completion(&ctlr->xfer_completion); 1630 1631 fallback_pio: 1632 spi_dma_sync_for_device(ctlr, xfer); 1633 ret = ctlr->transfer_one(ctlr, msg->spi, xfer); 1634 if (ret < 0) { 1635 spi_dma_sync_for_cpu(ctlr, xfer); 1636 1637 if ((xfer->tx_sg_mapped || xfer->rx_sg_mapped) && 1638 (xfer->error & SPI_TRANS_FAIL_NO_START)) { 1639 __spi_unmap_msg(ctlr, msg); 1640 ctlr->fallback = true; 1641 xfer->error &= ~SPI_TRANS_FAIL_NO_START; 1642 goto fallback_pio; 1643 } 1644 1645 SPI_STATISTICS_INCREMENT_FIELD(statm, 1646 errors); 1647 SPI_STATISTICS_INCREMENT_FIELD(stats, 1648 errors); 1649 dev_err(&msg->spi->dev, 1650 "SPI transfer failed: %d\n", ret); 1651 goto out; 1652 } 1653 1654 if (ret > 0) { 1655 ret = spi_transfer_wait(ctlr, msg, xfer); 1656 if (ret < 0) 1657 msg->status = ret; 1658 } 1659 1660 spi_dma_sync_for_cpu(ctlr, xfer); 1661 } else { 1662 if (xfer->len) 1663 dev_err(&msg->spi->dev, 1664 "Bufferless transfer has length %u\n", 1665 xfer->len); 1666 } 1667 1668 if (!ctlr->ptp_sts_supported) { 1669 ptp_read_system_postts(xfer->ptp_sts); 1670 xfer->ptp_sts_word_post = xfer->len; 1671 } 1672 1673 trace_spi_transfer_stop(msg, xfer); 1674 1675 if (msg->status != -EINPROGRESS) 1676 goto out; 1677 1678 spi_transfer_delay_exec(xfer); 1679 1680 if (xfer->cs_change) { 1681 if (list_is_last(&xfer->transfer_list, 1682 &msg->transfers)) { 1683 keep_cs = true; 1684 } else { 1685 if (!xfer->cs_off) 1686 spi_set_cs(msg->spi, false, false); 1687 _spi_transfer_cs_change_delay(msg, xfer); 1688 if (!list_next_entry(xfer, transfer_list)->cs_off) 1689 spi_set_cs(msg->spi, true, false); 1690 } 1691 } else if (!list_is_last(&xfer->transfer_list, &msg->transfers) && 1692 xfer->cs_off != list_next_entry(xfer, transfer_list)->cs_off) { 1693 spi_set_cs(msg->spi, xfer->cs_off, false); 1694 } 1695 1696 msg->actual_length += xfer->len; 1697 } 1698 1699 out: 1700 if (ret != 0 || !keep_cs) 1701 spi_set_cs(msg->spi, false, false); 1702 1703 if (msg->status == -EINPROGRESS) 1704 msg->status = ret; 1705 1706 if (msg->status && ctlr->handle_err) 1707 ctlr->handle_err(ctlr, msg); 1708 1709 spi_finalize_current_message(ctlr); 1710 1711 return ret; 1712 } 1713 1714 /** 1715 * spi_finalize_current_transfer - report completion of a transfer 1716 * @ctlr: the controller reporting completion 1717 * 1718 * Called by SPI drivers using the core transfer_one_message() 1719 * implementation to notify it that the current interrupt driven 1720 * transfer has finished and the next one may be scheduled. 1721 */ 1722 void spi_finalize_current_transfer(struct spi_controller *ctlr) 1723 { 1724 complete(&ctlr->xfer_completion); 1725 } 1726 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer); 1727 1728 static void spi_idle_runtime_pm(struct spi_controller *ctlr) 1729 { 1730 if (ctlr->auto_runtime_pm) { 1731 pm_runtime_put_autosuspend(ctlr->dev.parent); 1732 } 1733 } 1734 1735 static int __spi_pump_transfer_message(struct spi_controller *ctlr, 1736 struct spi_message *msg, bool was_busy) 1737 { 1738 struct spi_transfer *xfer; 1739 int ret; 1740 1741 if (!was_busy && ctlr->auto_runtime_pm) { 1742 ret = pm_runtime_get_sync(ctlr->dev.parent); 1743 if (ret < 0) { 1744 pm_runtime_put_noidle(ctlr->dev.parent); 1745 dev_err(&ctlr->dev, "Failed to power device: %d\n", 1746 ret); 1747 1748 msg->status = ret; 1749 spi_finalize_current_message(ctlr); 1750 1751 return ret; 1752 } 1753 } 1754 1755 if (!was_busy) 1756 trace_spi_controller_busy(ctlr); 1757 1758 if (!was_busy && ctlr->prepare_transfer_hardware) { 1759 ret = ctlr->prepare_transfer_hardware(ctlr); 1760 if (ret) { 1761 dev_err(&ctlr->dev, 1762 "failed to prepare transfer hardware: %d\n", 1763 ret); 1764 1765 if (ctlr->auto_runtime_pm) 1766 pm_runtime_put(ctlr->dev.parent); 1767 1768 msg->status = ret; 1769 spi_finalize_current_message(ctlr); 1770 1771 return ret; 1772 } 1773 } 1774 1775 trace_spi_message_start(msg); 1776 1777 if (ctlr->prepare_message) { 1778 ret = ctlr->prepare_message(ctlr, msg); 1779 if (ret) { 1780 dev_err(&ctlr->dev, "failed to prepare message: %d\n", 1781 ret); 1782 msg->status = ret; 1783 spi_finalize_current_message(ctlr); 1784 return ret; 1785 } 1786 msg->prepared = true; 1787 } 1788 1789 ret = spi_map_msg(ctlr, msg); 1790 if (ret) { 1791 msg->status = ret; 1792 spi_finalize_current_message(ctlr); 1793 return ret; 1794 } 1795 1796 if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) { 1797 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1798 xfer->ptp_sts_word_pre = 0; 1799 ptp_read_system_prets(xfer->ptp_sts); 1800 } 1801 } 1802 1803 /* 1804 * Drivers implementation of transfer_one_message() must arrange for 1805 * spi_finalize_current_message() to get called. Most drivers will do 1806 * this in the calling context, but some don't. For those cases, a 1807 * completion is used to guarantee that this function does not return 1808 * until spi_finalize_current_message() is done accessing 1809 * ctlr->cur_msg. 1810 * Use of the following two flags enable to opportunistically skip the 1811 * use of the completion since its use involves expensive spin locks. 1812 * In case of a race with the context that calls 1813 * spi_finalize_current_message() the completion will always be used, 1814 * due to strict ordering of these flags using barriers. 1815 */ 1816 WRITE_ONCE(ctlr->cur_msg_incomplete, true); 1817 WRITE_ONCE(ctlr->cur_msg_need_completion, false); 1818 reinit_completion(&ctlr->cur_msg_completion); 1819 smp_wmb(); /* Make these available to spi_finalize_current_message() */ 1820 1821 ret = ctlr->transfer_one_message(ctlr, msg); 1822 if (ret) { 1823 dev_err(&ctlr->dev, 1824 "failed to transfer one message from queue\n"); 1825 return ret; 1826 } 1827 1828 WRITE_ONCE(ctlr->cur_msg_need_completion, true); 1829 smp_mb(); /* See spi_finalize_current_message()... */ 1830 if (READ_ONCE(ctlr->cur_msg_incomplete)) 1831 wait_for_completion(&ctlr->cur_msg_completion); 1832 1833 return 0; 1834 } 1835 1836 /** 1837 * __spi_pump_messages - function which processes SPI message queue 1838 * @ctlr: controller to process queue for 1839 * @in_kthread: true if we are in the context of the message pump thread 1840 * 1841 * This function checks if there is any SPI message in the queue that 1842 * needs processing and if so call out to the driver to initialize hardware 1843 * and transfer each message. 1844 * 1845 * Note that it is called both from the kthread itself and also from 1846 * inside spi_sync(); the queue extraction handling at the top of the 1847 * function should deal with this safely. 1848 */ 1849 static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread) 1850 { 1851 struct spi_message *msg; 1852 bool was_busy = false; 1853 unsigned long flags; 1854 int ret; 1855 1856 /* Take the I/O mutex */ 1857 mutex_lock(&ctlr->io_mutex); 1858 1859 /* Lock queue */ 1860 spin_lock_irqsave(&ctlr->queue_lock, flags); 1861 1862 /* Make sure we are not already running a message */ 1863 if (ctlr->cur_msg) 1864 goto out_unlock; 1865 1866 /* Check if the queue is idle */ 1867 if (list_empty(&ctlr->queue) || !ctlr->running) { 1868 if (!ctlr->busy) 1869 goto out_unlock; 1870 1871 /* Defer any non-atomic teardown to the thread */ 1872 if (!in_kthread) { 1873 if (!ctlr->dummy_rx && !ctlr->dummy_tx && 1874 !ctlr->unprepare_transfer_hardware) { 1875 spi_idle_runtime_pm(ctlr); 1876 ctlr->busy = false; 1877 ctlr->queue_empty = true; 1878 trace_spi_controller_idle(ctlr); 1879 } else { 1880 kthread_queue_work(ctlr->kworker, 1881 &ctlr->pump_messages); 1882 } 1883 goto out_unlock; 1884 } 1885 1886 ctlr->busy = false; 1887 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1888 1889 kfree(ctlr->dummy_rx); 1890 ctlr->dummy_rx = NULL; 1891 kfree(ctlr->dummy_tx); 1892 ctlr->dummy_tx = NULL; 1893 if (ctlr->unprepare_transfer_hardware && 1894 ctlr->unprepare_transfer_hardware(ctlr)) 1895 dev_err(&ctlr->dev, 1896 "failed to unprepare transfer hardware\n"); 1897 spi_idle_runtime_pm(ctlr); 1898 trace_spi_controller_idle(ctlr); 1899 1900 spin_lock_irqsave(&ctlr->queue_lock, flags); 1901 ctlr->queue_empty = true; 1902 goto out_unlock; 1903 } 1904 1905 /* Extract head of queue */ 1906 msg = list_first_entry(&ctlr->queue, struct spi_message, queue); 1907 ctlr->cur_msg = msg; 1908 1909 list_del_init(&msg->queue); 1910 if (ctlr->busy) 1911 was_busy = true; 1912 else 1913 ctlr->busy = true; 1914 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1915 1916 ret = __spi_pump_transfer_message(ctlr, msg, was_busy); 1917 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); 1918 1919 ctlr->cur_msg = NULL; 1920 ctlr->fallback = false; 1921 1922 mutex_unlock(&ctlr->io_mutex); 1923 1924 /* Prod the scheduler in case transfer_one() was busy waiting */ 1925 if (!ret) 1926 cond_resched(); 1927 return; 1928 1929 out_unlock: 1930 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1931 mutex_unlock(&ctlr->io_mutex); 1932 } 1933 1934 /** 1935 * spi_pump_messages - kthread work function which processes spi message queue 1936 * @work: pointer to kthread work struct contained in the controller struct 1937 */ 1938 static void spi_pump_messages(struct kthread_work *work) 1939 { 1940 struct spi_controller *ctlr = 1941 container_of(work, struct spi_controller, pump_messages); 1942 1943 __spi_pump_messages(ctlr, true); 1944 } 1945 1946 /** 1947 * spi_take_timestamp_pre - helper to collect the beginning of the TX timestamp 1948 * @ctlr: Pointer to the spi_controller structure of the driver 1949 * @xfer: Pointer to the transfer being timestamped 1950 * @progress: How many words (not bytes) have been transferred so far 1951 * @irqs_off: If true, will disable IRQs and preemption for the duration of the 1952 * transfer, for less jitter in time measurement. Only compatible 1953 * with PIO drivers. If true, must follow up with 1954 * spi_take_timestamp_post or otherwise system will crash. 1955 * WARNING: for fully predictable results, the CPU frequency must 1956 * also be under control (governor). 1957 * 1958 * This is a helper for drivers to collect the beginning of the TX timestamp 1959 * for the requested byte from the SPI transfer. The frequency with which this 1960 * function must be called (once per word, once for the whole transfer, once 1961 * per batch of words etc) is arbitrary as long as the @tx buffer offset is 1962 * greater than or equal to the requested byte at the time of the call. The 1963 * timestamp is only taken once, at the first such call. It is assumed that 1964 * the driver advances its @tx buffer pointer monotonically. 1965 */ 1966 void spi_take_timestamp_pre(struct spi_controller *ctlr, 1967 struct spi_transfer *xfer, 1968 size_t progress, bool irqs_off) 1969 { 1970 if (!xfer->ptp_sts) 1971 return; 1972 1973 if (xfer->timestamped) 1974 return; 1975 1976 if (progress > xfer->ptp_sts_word_pre) 1977 return; 1978 1979 /* Capture the resolution of the timestamp */ 1980 xfer->ptp_sts_word_pre = progress; 1981 1982 if (irqs_off) { 1983 local_irq_save(ctlr->irq_flags); 1984 preempt_disable(); 1985 } 1986 1987 ptp_read_system_prets(xfer->ptp_sts); 1988 } 1989 EXPORT_SYMBOL_GPL(spi_take_timestamp_pre); 1990 1991 /** 1992 * spi_take_timestamp_post - helper to collect the end of the TX timestamp 1993 * @ctlr: Pointer to the spi_controller structure of the driver 1994 * @xfer: Pointer to the transfer being timestamped 1995 * @progress: How many words (not bytes) have been transferred so far 1996 * @irqs_off: If true, will re-enable IRQs and preemption for the local CPU. 1997 * 1998 * This is a helper for drivers to collect the end of the TX timestamp for 1999 * the requested byte from the SPI transfer. Can be called with an arbitrary 2000 * frequency: only the first call where @tx exceeds or is equal to the 2001 * requested word will be timestamped. 2002 */ 2003 void spi_take_timestamp_post(struct spi_controller *ctlr, 2004 struct spi_transfer *xfer, 2005 size_t progress, bool irqs_off) 2006 { 2007 if (!xfer->ptp_sts) 2008 return; 2009 2010 if (xfer->timestamped) 2011 return; 2012 2013 if (progress < xfer->ptp_sts_word_post) 2014 return; 2015 2016 ptp_read_system_postts(xfer->ptp_sts); 2017 2018 if (irqs_off) { 2019 local_irq_restore(ctlr->irq_flags); 2020 preempt_enable(); 2021 } 2022 2023 /* Capture the resolution of the timestamp */ 2024 xfer->ptp_sts_word_post = progress; 2025 2026 xfer->timestamped = 1; 2027 } 2028 EXPORT_SYMBOL_GPL(spi_take_timestamp_post); 2029 2030 /** 2031 * spi_set_thread_rt - set the controller to pump at realtime priority 2032 * @ctlr: controller to boost priority of 2033 * 2034 * This can be called because the controller requested realtime priority 2035 * (by setting the ->rt value before calling spi_register_controller()) or 2036 * because a device on the bus said that its transfers needed realtime 2037 * priority. 2038 * 2039 * NOTE: at the moment if any device on a bus says it needs realtime then 2040 * the thread will be at realtime priority for all transfers on that 2041 * controller. If this eventually becomes a problem we may see if we can 2042 * find a way to boost the priority only temporarily during relevant 2043 * transfers. 2044 */ 2045 static void spi_set_thread_rt(struct spi_controller *ctlr) 2046 { 2047 dev_info(&ctlr->dev, 2048 "will run message pump with realtime priority\n"); 2049 sched_set_fifo(ctlr->kworker->task); 2050 } 2051 2052 static int spi_init_queue(struct spi_controller *ctlr) 2053 { 2054 ctlr->running = false; 2055 ctlr->busy = false; 2056 ctlr->queue_empty = true; 2057 2058 ctlr->kworker = kthread_run_worker(0, dev_name(&ctlr->dev)); 2059 if (IS_ERR(ctlr->kworker)) { 2060 dev_err(&ctlr->dev, "failed to create message pump kworker\n"); 2061 return PTR_ERR(ctlr->kworker); 2062 } 2063 2064 kthread_init_work(&ctlr->pump_messages, spi_pump_messages); 2065 2066 /* 2067 * Controller config will indicate if this controller should run the 2068 * message pump with high (realtime) priority to reduce the transfer 2069 * latency on the bus by minimising the delay between a transfer 2070 * request and the scheduling of the message pump thread. Without this 2071 * setting the message pump thread will remain at default priority. 2072 */ 2073 if (ctlr->rt) 2074 spi_set_thread_rt(ctlr); 2075 2076 return 0; 2077 } 2078 2079 /** 2080 * spi_get_next_queued_message() - called by driver to check for queued 2081 * messages 2082 * @ctlr: the controller to check for queued messages 2083 * 2084 * If there are more messages in the queue, the next message is returned from 2085 * this call. 2086 * 2087 * Return: the next message in the queue, else NULL if the queue is empty. 2088 */ 2089 struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr) 2090 { 2091 struct spi_message *next; 2092 unsigned long flags; 2093 2094 /* Get a pointer to the next message, if any */ 2095 spin_lock_irqsave(&ctlr->queue_lock, flags); 2096 next = list_first_entry_or_null(&ctlr->queue, struct spi_message, 2097 queue); 2098 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 2099 2100 return next; 2101 } 2102 EXPORT_SYMBOL_GPL(spi_get_next_queued_message); 2103 2104 /* 2105 * __spi_unoptimize_message - shared implementation of spi_unoptimize_message() 2106 * and spi_maybe_unoptimize_message() 2107 * @msg: the message to unoptimize 2108 * 2109 * Peripheral drivers should use spi_unoptimize_message() and callers inside 2110 * core should use spi_maybe_unoptimize_message() rather than calling this 2111 * function directly. 2112 * 2113 * It is not valid to call this on a message that is not currently optimized. 2114 */ 2115 static void __spi_unoptimize_message(struct spi_message *msg) 2116 { 2117 struct spi_controller *ctlr = msg->spi->controller; 2118 2119 if (ctlr->unoptimize_message) 2120 ctlr->unoptimize_message(msg); 2121 2122 spi_res_release(ctlr, msg); 2123 2124 msg->optimized = false; 2125 msg->opt_state = NULL; 2126 } 2127 2128 /* 2129 * spi_maybe_unoptimize_message - unoptimize msg not managed by a peripheral 2130 * @msg: the message to unoptimize 2131 * 2132 * This function is used to unoptimize a message if and only if it was 2133 * optimized by the core (via spi_maybe_optimize_message()). 2134 */ 2135 static void spi_maybe_unoptimize_message(struct spi_message *msg) 2136 { 2137 if (!msg->pre_optimized && msg->optimized && 2138 !msg->spi->controller->defer_optimize_message) 2139 __spi_unoptimize_message(msg); 2140 } 2141 2142 /** 2143 * spi_finalize_current_message() - the current message is complete 2144 * @ctlr: the controller to return the message to 2145 * 2146 * Called by the driver to notify the core that the message in the front of the 2147 * queue is complete and can be removed from the queue. 2148 */ 2149 void spi_finalize_current_message(struct spi_controller *ctlr) 2150 { 2151 struct spi_transfer *xfer; 2152 struct spi_message *mesg; 2153 int ret; 2154 2155 mesg = ctlr->cur_msg; 2156 2157 if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) { 2158 list_for_each_entry(xfer, &mesg->transfers, transfer_list) { 2159 ptp_read_system_postts(xfer->ptp_sts); 2160 xfer->ptp_sts_word_post = xfer->len; 2161 } 2162 } 2163 2164 if (unlikely(ctlr->ptp_sts_supported)) 2165 list_for_each_entry(xfer, &mesg->transfers, transfer_list) 2166 WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped); 2167 2168 spi_unmap_msg(ctlr, mesg); 2169 2170 if (mesg->prepared && ctlr->unprepare_message) { 2171 ret = ctlr->unprepare_message(ctlr, mesg); 2172 if (ret) { 2173 dev_err(&ctlr->dev, "failed to unprepare message: %d\n", 2174 ret); 2175 } 2176 } 2177 2178 mesg->prepared = false; 2179 2180 spi_maybe_unoptimize_message(mesg); 2181 2182 WRITE_ONCE(ctlr->cur_msg_incomplete, false); 2183 smp_mb(); /* See __spi_pump_transfer_message()... */ 2184 if (READ_ONCE(ctlr->cur_msg_need_completion)) 2185 complete(&ctlr->cur_msg_completion); 2186 2187 trace_spi_message_done(mesg); 2188 2189 mesg->state = NULL; 2190 if (mesg->complete) 2191 mesg->complete(mesg->context); 2192 } 2193 EXPORT_SYMBOL_GPL(spi_finalize_current_message); 2194 2195 static int spi_start_queue(struct spi_controller *ctlr) 2196 { 2197 unsigned long flags; 2198 2199 spin_lock_irqsave(&ctlr->queue_lock, flags); 2200 2201 if (ctlr->running || ctlr->busy) { 2202 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 2203 return -EBUSY; 2204 } 2205 2206 ctlr->running = true; 2207 ctlr->cur_msg = NULL; 2208 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 2209 2210 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); 2211 2212 return 0; 2213 } 2214 2215 static int spi_stop_queue(struct spi_controller *ctlr) 2216 { 2217 unsigned int limit = 500; 2218 unsigned long flags; 2219 2220 /* 2221 * This is a bit lame, but is optimized for the common execution path. 2222 * A wait_queue on the ctlr->busy could be used, but then the common 2223 * execution path (pump_messages) would be required to call wake_up or 2224 * friends on every SPI message. Do this instead. 2225 */ 2226 do { 2227 spin_lock_irqsave(&ctlr->queue_lock, flags); 2228 if (list_empty(&ctlr->queue) && !ctlr->busy) { 2229 ctlr->running = false; 2230 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 2231 return 0; 2232 } 2233 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 2234 usleep_range(10000, 11000); 2235 } while (--limit); 2236 2237 return -EBUSY; 2238 } 2239 2240 static int spi_destroy_queue(struct spi_controller *ctlr) 2241 { 2242 int ret; 2243 2244 ret = spi_stop_queue(ctlr); 2245 2246 /* 2247 * kthread_flush_worker will block until all work is done. 2248 * If the reason that stop_queue timed out is that the work will never 2249 * finish, then it does no good to call flush/stop thread, so 2250 * return anyway. 2251 */ 2252 if (ret) { 2253 dev_err(&ctlr->dev, "problem destroying queue\n"); 2254 return ret; 2255 } 2256 2257 kthread_destroy_worker(ctlr->kworker); 2258 2259 return 0; 2260 } 2261 2262 static int __spi_queued_transfer(struct spi_device *spi, 2263 struct spi_message *msg, 2264 bool need_pump) 2265 { 2266 struct spi_controller *ctlr = spi->controller; 2267 unsigned long flags; 2268 2269 spin_lock_irqsave(&ctlr->queue_lock, flags); 2270 2271 if (!ctlr->running) { 2272 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 2273 return -ESHUTDOWN; 2274 } 2275 msg->actual_length = 0; 2276 msg->status = -EINPROGRESS; 2277 2278 list_add_tail(&msg->queue, &ctlr->queue); 2279 ctlr->queue_empty = false; 2280 if (!ctlr->busy && need_pump) 2281 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); 2282 2283 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 2284 return 0; 2285 } 2286 2287 /** 2288 * spi_queued_transfer - transfer function for queued transfers 2289 * @spi: SPI device which is requesting transfer 2290 * @msg: SPI message which is to handled is queued to driver queue 2291 * 2292 * Return: zero on success, else a negative error code. 2293 */ 2294 static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg) 2295 { 2296 return __spi_queued_transfer(spi, msg, true); 2297 } 2298 2299 static int spi_controller_initialize_queue(struct spi_controller *ctlr) 2300 { 2301 int ret; 2302 2303 ctlr->transfer = spi_queued_transfer; 2304 if (!ctlr->transfer_one_message) 2305 ctlr->transfer_one_message = spi_transfer_one_message; 2306 2307 /* Initialize and start queue */ 2308 ret = spi_init_queue(ctlr); 2309 if (ret) { 2310 dev_err(&ctlr->dev, "problem initializing queue\n"); 2311 goto err_init_queue; 2312 } 2313 ctlr->queued = true; 2314 ret = spi_start_queue(ctlr); 2315 if (ret) { 2316 dev_err(&ctlr->dev, "problem starting queue\n"); 2317 goto err_start_queue; 2318 } 2319 2320 return 0; 2321 2322 err_start_queue: 2323 spi_destroy_queue(ctlr); 2324 err_init_queue: 2325 return ret; 2326 } 2327 2328 /** 2329 * spi_flush_queue - Send all pending messages in the queue from the callers' 2330 * context 2331 * @ctlr: controller to process queue for 2332 * 2333 * This should be used when one wants to ensure all pending messages have been 2334 * sent before doing something. Is used by the spi-mem code to make sure SPI 2335 * memory operations do not preempt regular SPI transfers that have been queued 2336 * before the spi-mem operation. 2337 */ 2338 void spi_flush_queue(struct spi_controller *ctlr) 2339 { 2340 if (ctlr->transfer == spi_queued_transfer) 2341 __spi_pump_messages(ctlr, false); 2342 } 2343 2344 /*-------------------------------------------------------------------------*/ 2345 2346 #if defined(CONFIG_OF) 2347 static void of_spi_parse_dt_cs_delay(struct device_node *nc, 2348 struct spi_delay *delay, const char *prop) 2349 { 2350 u32 value; 2351 2352 if (!of_property_read_u32(nc, prop, &value)) { 2353 if (value > U16_MAX) { 2354 delay->value = DIV_ROUND_UP(value, 1000); 2355 delay->unit = SPI_DELAY_UNIT_USECS; 2356 } else { 2357 delay->value = value; 2358 delay->unit = SPI_DELAY_UNIT_NSECS; 2359 } 2360 } 2361 } 2362 2363 static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi, 2364 struct device_node *nc) 2365 { 2366 u32 value, cs[SPI_DEVICE_CS_CNT_MAX], map[SPI_DEVICE_DATA_LANE_CNT_MAX]; 2367 int rc, idx, max_num_data_lanes; 2368 2369 /* Mode (clock phase/polarity/etc.) */ 2370 if (of_property_read_bool(nc, "spi-cpha")) 2371 spi->mode |= SPI_CPHA; 2372 if (of_property_read_bool(nc, "spi-cpol")) 2373 spi->mode |= SPI_CPOL; 2374 if (of_property_read_bool(nc, "spi-3wire")) 2375 spi->mode |= SPI_3WIRE; 2376 if (of_property_read_bool(nc, "spi-lsb-first")) 2377 spi->mode |= SPI_LSB_FIRST; 2378 if (of_property_read_bool(nc, "spi-cs-high")) 2379 spi->mode |= SPI_CS_HIGH; 2380 2381 /* Device DUAL/QUAD mode */ 2382 2383 rc = of_property_read_variable_u32_array(nc, "spi-tx-lane-map", map, 1, 2384 ARRAY_SIZE(map)); 2385 if (rc >= 0) { 2386 max_num_data_lanes = rc; 2387 for (idx = 0; idx < max_num_data_lanes; idx++) 2388 spi->tx_lane_map[idx] = map[idx]; 2389 } else if (rc == -EINVAL) { 2390 /* Default lane map is identity mapping. */ 2391 max_num_data_lanes = ARRAY_SIZE(spi->tx_lane_map); 2392 for (idx = 0; idx < max_num_data_lanes; idx++) 2393 spi->tx_lane_map[idx] = idx; 2394 } else { 2395 dev_err(&ctlr->dev, 2396 "failed to read spi-tx-lane-map property: %d\n", rc); 2397 return rc; 2398 } 2399 2400 rc = of_property_count_u32_elems(nc, "spi-tx-bus-width"); 2401 if (rc < 0 && rc != -EINVAL) { 2402 dev_err(&ctlr->dev, 2403 "failed to read spi-tx-bus-width property: %d\n", rc); 2404 return rc; 2405 } 2406 if (rc > max_num_data_lanes) { 2407 dev_err(&ctlr->dev, 2408 "spi-tx-bus-width has more elements (%d) than spi-tx-lane-map (%d)\n", 2409 rc, max_num_data_lanes); 2410 return -EINVAL; 2411 } 2412 2413 if (rc == -EINVAL) { 2414 /* Default when property is not present. */ 2415 spi->num_tx_lanes = 1; 2416 } else { 2417 u32 first_value; 2418 2419 spi->num_tx_lanes = rc; 2420 2421 for (idx = 0; idx < spi->num_tx_lanes; idx++) { 2422 rc = of_property_read_u32_index(nc, "spi-tx-bus-width", 2423 idx, &value); 2424 if (rc) 2425 return rc; 2426 2427 /* 2428 * For now, we only support all lanes having the same 2429 * width so we can keep using the existing mode flags. 2430 */ 2431 if (!idx) 2432 first_value = value; 2433 else if (first_value != value) { 2434 dev_err(&ctlr->dev, 2435 "spi-tx-bus-width has inconsistent values: first %d vs later %d\n", 2436 first_value, value); 2437 return -EINVAL; 2438 } 2439 } 2440 2441 switch (value) { 2442 case 0: 2443 spi->mode |= SPI_NO_TX; 2444 break; 2445 case 1: 2446 break; 2447 case 2: 2448 spi->mode |= SPI_TX_DUAL; 2449 break; 2450 case 4: 2451 spi->mode |= SPI_TX_QUAD; 2452 break; 2453 case 8: 2454 spi->mode |= SPI_TX_OCTAL; 2455 break; 2456 default: 2457 dev_warn(&ctlr->dev, 2458 "spi-tx-bus-width %d not supported\n", 2459 value); 2460 break; 2461 } 2462 } 2463 2464 for (idx = 0; idx < spi->num_tx_lanes; idx++) { 2465 if (spi->tx_lane_map[idx] >= spi->controller->num_data_lanes) { 2466 dev_err(&ctlr->dev, 2467 "spi-tx-lane-map has invalid value %d (num_data_lanes=%d)\n", 2468 spi->tx_lane_map[idx], 2469 spi->controller->num_data_lanes); 2470 return -EINVAL; 2471 } 2472 } 2473 2474 rc = of_property_read_variable_u32_array(nc, "spi-rx-lane-map", map, 1, 2475 ARRAY_SIZE(map)); 2476 if (rc >= 0) { 2477 max_num_data_lanes = rc; 2478 for (idx = 0; idx < max_num_data_lanes; idx++) 2479 spi->rx_lane_map[idx] = map[idx]; 2480 } else if (rc == -EINVAL) { 2481 /* Default lane map is identity mapping. */ 2482 max_num_data_lanes = ARRAY_SIZE(spi->rx_lane_map); 2483 for (idx = 0; idx < max_num_data_lanes; idx++) 2484 spi->rx_lane_map[idx] = idx; 2485 } else { 2486 dev_err(&ctlr->dev, 2487 "failed to read spi-rx-lane-map property: %d\n", rc); 2488 return rc; 2489 } 2490 2491 rc = of_property_count_u32_elems(nc, "spi-rx-bus-width"); 2492 if (rc < 0 && rc != -EINVAL) { 2493 dev_err(&ctlr->dev, 2494 "failed to read spi-rx-bus-width property: %d\n", rc); 2495 return rc; 2496 } 2497 if (rc > max_num_data_lanes) { 2498 dev_err(&ctlr->dev, 2499 "spi-rx-bus-width has more elements (%d) than spi-rx-lane-map (%d)\n", 2500 rc, max_num_data_lanes); 2501 return -EINVAL; 2502 } 2503 2504 if (rc == -EINVAL) { 2505 /* Default when property is not present. */ 2506 spi->num_rx_lanes = 1; 2507 } else { 2508 u32 first_value; 2509 2510 spi->num_rx_lanes = rc; 2511 2512 for (idx = 0; idx < spi->num_rx_lanes; idx++) { 2513 rc = of_property_read_u32_index(nc, "spi-rx-bus-width", 2514 idx, &value); 2515 if (rc) 2516 return rc; 2517 2518 /* 2519 * For now, we only support all lanes having the same 2520 * width so we can keep using the existing mode flags. 2521 */ 2522 if (!idx) 2523 first_value = value; 2524 else if (first_value != value) { 2525 dev_err(&ctlr->dev, 2526 "spi-rx-bus-width has inconsistent values: first %d vs later %d\n", 2527 first_value, value); 2528 return -EINVAL; 2529 } 2530 } 2531 2532 switch (value) { 2533 case 0: 2534 spi->mode |= SPI_NO_RX; 2535 break; 2536 case 1: 2537 break; 2538 case 2: 2539 spi->mode |= SPI_RX_DUAL; 2540 break; 2541 case 4: 2542 spi->mode |= SPI_RX_QUAD; 2543 break; 2544 case 8: 2545 spi->mode |= SPI_RX_OCTAL; 2546 break; 2547 default: 2548 dev_warn(&ctlr->dev, 2549 "spi-rx-bus-width %d not supported\n", 2550 value); 2551 break; 2552 } 2553 } 2554 2555 for (idx = 0; idx < spi->num_rx_lanes; idx++) { 2556 if (spi->rx_lane_map[idx] >= spi->controller->num_data_lanes) { 2557 dev_err(&ctlr->dev, 2558 "spi-rx-lane-map has invalid value %d (num_data_lanes=%d)\n", 2559 spi->rx_lane_map[idx], 2560 spi->controller->num_data_lanes); 2561 return -EINVAL; 2562 } 2563 } 2564 2565 if (spi_controller_is_target(ctlr)) { 2566 if (!of_node_name_eq(nc, "slave")) { 2567 dev_err(&ctlr->dev, "%pOF is not called 'slave'\n", 2568 nc); 2569 return -EINVAL; 2570 } 2571 return 0; 2572 } 2573 2574 /* Device address */ 2575 rc = of_property_read_variable_u32_array(nc, "reg", &cs[0], 1, 2576 SPI_DEVICE_CS_CNT_MAX); 2577 if (rc < 0) { 2578 dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n", 2579 nc, rc); 2580 return rc; 2581 } 2582 2583 if ((of_property_present(nc, "parallel-memories")) && 2584 (!(ctlr->flags & SPI_CONTROLLER_MULTI_CS))) { 2585 dev_err(&ctlr->dev, "SPI controller doesn't support multi CS\n"); 2586 return -EINVAL; 2587 } 2588 2589 spi->num_chipselect = rc; 2590 for (idx = 0; idx < rc; idx++) 2591 spi_set_chipselect(spi, idx, cs[idx]); 2592 2593 /* 2594 * By default spi->chip_select[0] will hold the physical CS number, 2595 * so set bit 0 in spi->cs_index_mask. 2596 */ 2597 spi->cs_index_mask = BIT(0); 2598 2599 /* Device speed */ 2600 if (!of_property_read_u32(nc, "spi-max-frequency", &value)) 2601 spi->max_speed_hz = value; 2602 2603 /* Device CS delays */ 2604 of_spi_parse_dt_cs_delay(nc, &spi->cs_setup, "spi-cs-setup-delay-ns"); 2605 of_spi_parse_dt_cs_delay(nc, &spi->cs_hold, "spi-cs-hold-delay-ns"); 2606 of_spi_parse_dt_cs_delay(nc, &spi->cs_inactive, "spi-cs-inactive-delay-ns"); 2607 2608 return 0; 2609 } 2610 2611 static struct spi_device * 2612 of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc) 2613 { 2614 struct spi_device *spi; 2615 int rc; 2616 2617 /* Alloc an spi_device */ 2618 spi = spi_alloc_device(ctlr); 2619 if (!spi) { 2620 dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc); 2621 rc = -ENOMEM; 2622 goto err_out; 2623 } 2624 2625 /* Select device driver */ 2626 rc = of_alias_from_compatible(nc, spi->modalias, 2627 sizeof(spi->modalias)); 2628 if (rc < 0) { 2629 dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc); 2630 goto err_out; 2631 } 2632 2633 rc = of_spi_parse_dt(ctlr, spi, nc); 2634 if (rc) 2635 goto err_out; 2636 2637 /* Store a pointer to the node in the device structure */ 2638 of_node_get(nc); 2639 2640 device_set_node(&spi->dev, of_fwnode_handle(nc)); 2641 2642 /* Register the new device */ 2643 rc = spi_add_device(spi); 2644 if (rc) { 2645 dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc); 2646 goto err_of_node_put; 2647 } 2648 2649 return spi; 2650 2651 err_of_node_put: 2652 of_node_put(nc); 2653 err_out: 2654 spi_dev_put(spi); 2655 return ERR_PTR(rc); 2656 } 2657 2658 /** 2659 * of_register_spi_devices() - Register child devices onto the SPI bus 2660 * @ctlr: Pointer to spi_controller device 2661 * 2662 * Registers an spi_device for each child node of controller node which 2663 * represents a valid SPI target device. 2664 */ 2665 static void of_register_spi_devices(struct spi_controller *ctlr) 2666 { 2667 struct spi_device *spi; 2668 struct device_node *nc; 2669 2670 for_each_available_child_of_node(ctlr->dev.of_node, nc) { 2671 if (of_node_test_and_set_flag(nc, OF_POPULATED)) 2672 continue; 2673 spi = of_register_spi_device(ctlr, nc); 2674 if (IS_ERR(spi)) { 2675 dev_warn(&ctlr->dev, 2676 "Failed to create SPI device for %pOF\n", nc); 2677 of_node_clear_flag(nc, OF_POPULATED); 2678 } 2679 } 2680 } 2681 #else 2682 static void of_register_spi_devices(struct spi_controller *ctlr) { } 2683 #endif 2684 2685 /** 2686 * spi_new_ancillary_device() - Register ancillary SPI device 2687 * @spi: Pointer to the main SPI device registering the ancillary device 2688 * @chip_select: Chip Select of the ancillary device 2689 * 2690 * Register an ancillary SPI device; for example some chips have a chip-select 2691 * for normal device usage and another one for setup/firmware upload. 2692 * 2693 * This may only be called from main SPI device's probe routine. 2694 * 2695 * Return: 0 on success; negative errno on failure 2696 */ 2697 struct spi_device *spi_new_ancillary_device(struct spi_device *spi, 2698 u8 chip_select) 2699 { 2700 struct spi_controller *ctlr = spi->controller; 2701 struct spi_device *ancillary; 2702 int rc; 2703 2704 /* Alloc an spi_device */ 2705 ancillary = spi_alloc_device(ctlr); 2706 if (!ancillary) { 2707 rc = -ENOMEM; 2708 goto err_out; 2709 } 2710 2711 strscpy(ancillary->modalias, "dummy", sizeof(ancillary->modalias)); 2712 2713 /* Use provided chip-select for ancillary device */ 2714 spi_set_chipselect(ancillary, 0, chip_select); 2715 2716 /* Take over SPI mode/speed from SPI main device */ 2717 ancillary->max_speed_hz = spi->max_speed_hz; 2718 ancillary->mode = spi->mode; 2719 /* 2720 * By default spi->chip_select[0] will hold the physical CS number, 2721 * so set bit 0 in spi->cs_index_mask. 2722 */ 2723 ancillary->cs_index_mask = BIT(0); 2724 2725 WARN_ON(!mutex_is_locked(&ctlr->add_lock)); 2726 2727 /* Register the new device, passing the parent to skip CS conflict check */ 2728 rc = __spi_add_device(ancillary, spi); 2729 if (rc) { 2730 dev_err(&spi->dev, "failed to register ancillary device\n"); 2731 goto err_out; 2732 } 2733 2734 return ancillary; 2735 2736 err_out: 2737 spi_dev_put(ancillary); 2738 return ERR_PTR(rc); 2739 } 2740 EXPORT_SYMBOL_GPL(spi_new_ancillary_device); 2741 2742 static void devm_spi_unregister_device(void *spi) 2743 { 2744 spi_unregister_device(spi); 2745 } 2746 2747 /** 2748 * devm_spi_new_ancillary_device() - Register managed ancillary SPI device 2749 * @spi: Pointer to the main SPI device registering the ancillary device 2750 * @chip_select: Chip Select of the ancillary device 2751 * 2752 * Register an ancillary SPI device; for example some chips have a chip-select 2753 * for normal device usage and another one for setup/firmware upload. 2754 * 2755 * This is the managed version of spi_new_ancillary_device(). The ancillary 2756 * device will be unregistered automatically when the parent SPI device is 2757 * unregistered. 2758 * 2759 * This may only be called from main SPI device's probe routine. 2760 * 2761 * Return: Pointer to new ancillary device on success; ERR_PTR on failure 2762 */ 2763 struct spi_device *devm_spi_new_ancillary_device(struct spi_device *spi, 2764 u8 chip_select) 2765 { 2766 struct spi_device *ancillary; 2767 int ret; 2768 2769 ancillary = spi_new_ancillary_device(spi, chip_select); 2770 if (IS_ERR(ancillary)) 2771 return ancillary; 2772 2773 ret = devm_add_action_or_reset(&spi->dev, devm_spi_unregister_device, 2774 ancillary); 2775 if (ret) 2776 return ERR_PTR(ret); 2777 2778 return ancillary; 2779 } 2780 EXPORT_SYMBOL_GPL(devm_spi_new_ancillary_device); 2781 2782 #ifdef CONFIG_ACPI 2783 struct acpi_spi_lookup { 2784 struct spi_controller *ctlr; 2785 u32 max_speed_hz; 2786 u32 mode; 2787 int irq; 2788 u8 bits_per_word; 2789 u8 chip_select; 2790 int n; 2791 int index; 2792 }; 2793 2794 static int acpi_spi_count(struct acpi_resource *ares, void *data) 2795 { 2796 struct acpi_resource_spi_serialbus *sb; 2797 int *count = data; 2798 2799 if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS) 2800 return 1; 2801 2802 sb = &ares->data.spi_serial_bus; 2803 if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_SPI) 2804 return 1; 2805 2806 *count = *count + 1; 2807 2808 return 1; 2809 } 2810 2811 /** 2812 * acpi_spi_count_resources - Count the number of SpiSerialBus resources 2813 * @adev: ACPI device 2814 * 2815 * Return: the number of SpiSerialBus resources in the ACPI-device's 2816 * resource-list; or a negative error code. 2817 */ 2818 int acpi_spi_count_resources(struct acpi_device *adev) 2819 { 2820 LIST_HEAD(r); 2821 int count = 0; 2822 int ret; 2823 2824 ret = acpi_dev_get_resources(adev, &r, acpi_spi_count, &count); 2825 if (ret < 0) 2826 return ret; 2827 2828 acpi_dev_free_resource_list(&r); 2829 2830 return count; 2831 } 2832 EXPORT_SYMBOL_GPL(acpi_spi_count_resources); 2833 2834 static void acpi_spi_parse_apple_properties(struct acpi_device *dev, 2835 struct acpi_spi_lookup *lookup) 2836 { 2837 const union acpi_object *obj; 2838 2839 if (!x86_apple_machine) 2840 return; 2841 2842 if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj) 2843 && obj->buffer.length >= 4) 2844 lookup->max_speed_hz = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer; 2845 2846 if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj) 2847 && obj->buffer.length == 8) 2848 lookup->bits_per_word = *(u64 *)obj->buffer.pointer; 2849 2850 if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj) 2851 && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer) 2852 lookup->mode |= SPI_LSB_FIRST; 2853 2854 if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj) 2855 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer) 2856 lookup->mode |= SPI_CPOL; 2857 2858 if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj) 2859 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer) 2860 lookup->mode |= SPI_CPHA; 2861 } 2862 2863 static int acpi_spi_add_resource(struct acpi_resource *ares, void *data) 2864 { 2865 struct acpi_spi_lookup *lookup = data; 2866 struct spi_controller *ctlr = lookup->ctlr; 2867 2868 if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) { 2869 struct acpi_resource_spi_serialbus *sb; 2870 acpi_handle parent_handle; 2871 acpi_status status; 2872 2873 sb = &ares->data.spi_serial_bus; 2874 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) { 2875 2876 if (lookup->index != -1 && lookup->n++ != lookup->index) 2877 return 1; 2878 2879 status = acpi_get_handle(NULL, 2880 sb->resource_source.string_ptr, 2881 &parent_handle); 2882 2883 if (ACPI_FAILURE(status)) 2884 return -ENODEV; 2885 2886 if (ctlr) { 2887 if (!device_match_acpi_handle(ctlr->dev.parent, parent_handle)) 2888 return -ENODEV; 2889 } else { 2890 struct acpi_device *adev; 2891 2892 adev = acpi_fetch_acpi_dev(parent_handle); 2893 if (!adev) 2894 return -ENODEV; 2895 2896 ctlr = acpi_spi_find_controller_by_adev(adev); 2897 if (!ctlr) 2898 return -EPROBE_DEFER; 2899 2900 lookup->ctlr = ctlr; 2901 } 2902 2903 /* 2904 * ACPI DeviceSelection numbering is handled by the 2905 * host controller driver in Windows and can vary 2906 * from driver to driver. In Linux we always expect 2907 * 0 .. max - 1 so we need to ask the driver to 2908 * translate between the two schemes. 2909 */ 2910 if (ctlr->fw_translate_cs) { 2911 int cs = ctlr->fw_translate_cs(ctlr, 2912 sb->device_selection); 2913 if (cs < 0) 2914 return cs; 2915 lookup->chip_select = cs; 2916 } else { 2917 lookup->chip_select = sb->device_selection; 2918 } 2919 2920 lookup->max_speed_hz = sb->connection_speed; 2921 lookup->bits_per_word = sb->data_bit_length; 2922 2923 if (sb->clock_phase == ACPI_SPI_SECOND_PHASE) 2924 lookup->mode |= SPI_CPHA; 2925 if (sb->clock_polarity == ACPI_SPI_START_HIGH) 2926 lookup->mode |= SPI_CPOL; 2927 if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH) 2928 lookup->mode |= SPI_CS_HIGH; 2929 } 2930 } else if (lookup->irq < 0) { 2931 struct resource r; 2932 2933 if (acpi_dev_resource_interrupt(ares, 0, &r)) 2934 lookup->irq = r.start; 2935 } 2936 2937 /* Always tell the ACPI core to skip this resource */ 2938 return 1; 2939 } 2940 2941 /** 2942 * acpi_spi_device_alloc - Allocate a spi device, and fill it in with ACPI information 2943 * @ctlr: controller to which the spi device belongs 2944 * @adev: ACPI Device for the spi device 2945 * @index: Index of the spi resource inside the ACPI Node 2946 * 2947 * This should be used to allocate a new SPI device from and ACPI Device node. 2948 * The caller is responsible for calling spi_add_device to register the SPI device. 2949 * 2950 * If ctlr is set to NULL, the Controller for the SPI device will be looked up 2951 * using the resource. 2952 * If index is set to -1, index is not used. 2953 * Note: If index is -1, ctlr must be set. 2954 * 2955 * Return: a pointer to the new device, or ERR_PTR on error. 2956 */ 2957 struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr, 2958 struct acpi_device *adev, 2959 int index) 2960 { 2961 acpi_handle parent_handle = NULL; 2962 struct list_head resource_list; 2963 struct acpi_spi_lookup lookup = {}; 2964 struct spi_device *spi; 2965 int ret; 2966 2967 if (!ctlr && index == -1) 2968 return ERR_PTR(-EINVAL); 2969 2970 lookup.ctlr = ctlr; 2971 lookup.irq = -1; 2972 lookup.index = index; 2973 lookup.n = 0; 2974 2975 INIT_LIST_HEAD(&resource_list); 2976 ret = acpi_dev_get_resources(adev, &resource_list, 2977 acpi_spi_add_resource, &lookup); 2978 acpi_dev_free_resource_list(&resource_list); 2979 2980 if (ret < 0) 2981 /* Found SPI in _CRS but it points to another controller */ 2982 return ERR_PTR(ret); 2983 2984 if (!lookup.max_speed_hz && 2985 ACPI_SUCCESS(acpi_get_parent(adev->handle, &parent_handle)) && 2986 device_match_acpi_handle(lookup.ctlr->dev.parent, parent_handle)) { 2987 /* Apple does not use _CRS but nested devices for SPI target devices */ 2988 acpi_spi_parse_apple_properties(adev, &lookup); 2989 } 2990 2991 if (!lookup.max_speed_hz) 2992 return ERR_PTR(-ENODEV); 2993 2994 spi = spi_alloc_device(lookup.ctlr); 2995 if (!spi) { 2996 dev_err(&lookup.ctlr->dev, "failed to allocate SPI device for %s\n", 2997 dev_name(&adev->dev)); 2998 return ERR_PTR(-ENOMEM); 2999 } 3000 3001 spi_set_chipselect(spi, 0, lookup.chip_select); 3002 3003 ACPI_COMPANION_SET(&spi->dev, adev); 3004 spi->max_speed_hz = lookup.max_speed_hz; 3005 spi->mode |= lookup.mode; 3006 spi->irq = lookup.irq; 3007 spi->bits_per_word = lookup.bits_per_word; 3008 /* 3009 * By default spi->chip_select[0] will hold the physical CS number, 3010 * so set bit 0 in spi->cs_index_mask. 3011 */ 3012 spi->cs_index_mask = BIT(0); 3013 3014 return spi; 3015 } 3016 EXPORT_SYMBOL_GPL(acpi_spi_device_alloc); 3017 3018 static acpi_status acpi_register_spi_device(struct spi_controller *ctlr, 3019 struct acpi_device *adev) 3020 { 3021 struct spi_device *spi; 3022 3023 if (acpi_bus_get_status(adev) || !adev->status.present || 3024 acpi_device_enumerated(adev)) 3025 return AE_OK; 3026 3027 spi = acpi_spi_device_alloc(ctlr, adev, -1); 3028 if (IS_ERR(spi)) { 3029 if (PTR_ERR(spi) == -ENOMEM) 3030 return AE_NO_MEMORY; 3031 else 3032 return AE_OK; 3033 } 3034 3035 acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias, 3036 sizeof(spi->modalias)); 3037 3038 /* 3039 * This gets re-tried in spi_probe() for -EPROBE_DEFER handling in case 3040 * the GPIO controller does not have a driver yet. This needs to be done 3041 * here too, because this call sets the GPIO direction and/or bias. 3042 * Setting these needs to be done even if there is no driver, in which 3043 * case spi_probe() will never get called. 3044 * TODO: ideally the setup of the GPIO should be handled in a generic 3045 * manner in the ACPI/gpiolib core code. 3046 */ 3047 if (spi->irq < 0) 3048 spi->irq = acpi_dev_gpio_irq_get(adev, 0); 3049 3050 acpi_device_set_enumerated(adev); 3051 3052 adev->power.flags.ignore_parent = true; 3053 if (spi_add_device(spi)) { 3054 adev->power.flags.ignore_parent = false; 3055 dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n", 3056 dev_name(&adev->dev)); 3057 spi_dev_put(spi); 3058 } 3059 3060 return AE_OK; 3061 } 3062 3063 static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level, 3064 void *data, void **return_value) 3065 { 3066 struct acpi_device *adev = acpi_fetch_acpi_dev(handle); 3067 struct spi_controller *ctlr = data; 3068 3069 if (!adev) 3070 return AE_OK; 3071 3072 return acpi_register_spi_device(ctlr, adev); 3073 } 3074 3075 #define SPI_ACPI_ENUMERATE_MAX_DEPTH 32 3076 3077 static void acpi_register_spi_devices(struct spi_controller *ctlr) 3078 { 3079 acpi_status status; 3080 acpi_handle handle; 3081 3082 handle = ACPI_HANDLE(ctlr->dev.parent); 3083 if (!handle) 3084 return; 3085 3086 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, 3087 SPI_ACPI_ENUMERATE_MAX_DEPTH, 3088 acpi_spi_add_device, NULL, ctlr, NULL); 3089 if (ACPI_FAILURE(status)) 3090 dev_warn(&ctlr->dev, "failed to enumerate SPI target devices\n"); 3091 } 3092 #else 3093 static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {} 3094 #endif /* CONFIG_ACPI */ 3095 3096 static void spi_controller_release(struct device *dev) 3097 { 3098 struct spi_controller *ctlr; 3099 3100 ctlr = container_of(dev, struct spi_controller, dev); 3101 3102 free_percpu(ctlr->pcpu_statistics); 3103 kfree(ctlr); 3104 } 3105 3106 static const struct class spi_controller_class = { 3107 .name = "spi_master", 3108 .dev_release = spi_controller_release, 3109 .dev_groups = spi_controller_groups, 3110 }; 3111 3112 #ifdef CONFIG_SPI_SLAVE 3113 /** 3114 * spi_target_abort - abort the ongoing transfer request on an SPI target controller 3115 * @spi: device used for the current transfer 3116 */ 3117 int spi_target_abort(struct spi_device *spi) 3118 { 3119 struct spi_controller *ctlr = spi->controller; 3120 3121 if (spi_controller_is_target(ctlr) && ctlr->target_abort) 3122 return ctlr->target_abort(ctlr); 3123 3124 return -ENOTSUPP; 3125 } 3126 EXPORT_SYMBOL_GPL(spi_target_abort); 3127 3128 static ssize_t slave_show(struct device *dev, struct device_attribute *attr, 3129 char *buf) 3130 { 3131 struct spi_controller *ctlr = container_of(dev, struct spi_controller, 3132 dev); 3133 struct device *child; 3134 int ret; 3135 3136 child = device_find_any_child(&ctlr->dev); 3137 ret = sysfs_emit(buf, "%s\n", child ? to_spi_device(child)->modalias : NULL); 3138 put_device(child); 3139 3140 return ret; 3141 } 3142 3143 static ssize_t slave_store(struct device *dev, struct device_attribute *attr, 3144 const char *buf, size_t count) 3145 { 3146 struct spi_controller *ctlr = container_of(dev, struct spi_controller, 3147 dev); 3148 struct spi_device *spi; 3149 struct device *child; 3150 char name[32]; 3151 int rc; 3152 3153 rc = sscanf(buf, "%31s", name); 3154 if (rc != 1 || !name[0]) 3155 return -EINVAL; 3156 3157 child = device_find_any_child(&ctlr->dev); 3158 if (child) { 3159 /* Remove registered target device */ 3160 device_unregister(child); 3161 put_device(child); 3162 } 3163 3164 if (strcmp(name, "(null)")) { 3165 /* Register new target device */ 3166 spi = spi_alloc_device(ctlr); 3167 if (!spi) 3168 return -ENOMEM; 3169 3170 strscpy(spi->modalias, name, sizeof(spi->modalias)); 3171 3172 rc = spi_add_device(spi); 3173 if (rc) { 3174 spi_dev_put(spi); 3175 return rc; 3176 } 3177 } 3178 3179 return count; 3180 } 3181 3182 static DEVICE_ATTR_RW(slave); 3183 3184 static struct attribute *spi_target_attrs[] = { 3185 &dev_attr_slave.attr, 3186 NULL, 3187 }; 3188 3189 static const struct attribute_group spi_target_group = { 3190 .attrs = spi_target_attrs, 3191 }; 3192 3193 static const struct attribute_group *spi_target_groups[] = { 3194 &spi_controller_statistics_group, 3195 &spi_target_group, 3196 NULL, 3197 }; 3198 3199 static const struct class spi_target_class = { 3200 .name = "spi_slave", 3201 .dev_release = spi_controller_release, 3202 .dev_groups = spi_target_groups, 3203 }; 3204 #else 3205 extern struct class spi_target_class; /* dummy */ 3206 #endif 3207 3208 /** 3209 * __spi_alloc_controller - allocate an SPI host or target controller 3210 * @dev: the controller, possibly using the platform_bus 3211 * @size: how much zeroed driver-private data to allocate; the pointer to this 3212 * memory is in the driver_data field of the returned device, accessible 3213 * with spi_controller_get_devdata(); the memory is cacheline aligned; 3214 * drivers granting DMA access to portions of their private data need to 3215 * round up @size using ALIGN(size, dma_get_cache_alignment()). 3216 * @target: flag indicating whether to allocate an SPI host (false) or SPI target (true) 3217 * controller 3218 * Context: can sleep 3219 * 3220 * This call is used only by SPI controller drivers, which are the 3221 * only ones directly touching chip registers. It's how they allocate 3222 * an spi_controller structure, prior to calling spi_register_controller(). 3223 * 3224 * This must be called from context that can sleep. 3225 * 3226 * The caller is responsible for assigning the bus number and initializing the 3227 * controller's methods before calling spi_register_controller(); and (after 3228 * errors adding the device) calling spi_controller_put() to prevent a memory 3229 * leak. 3230 * 3231 * Return: the SPI controller structure on success, else NULL. 3232 */ 3233 struct spi_controller *__spi_alloc_controller(struct device *dev, 3234 unsigned int size, bool target) 3235 { 3236 struct spi_controller *ctlr; 3237 size_t ctlr_size = ALIGN(sizeof(*ctlr), dma_get_cache_alignment()); 3238 3239 if (!dev) 3240 return NULL; 3241 3242 ctlr = kzalloc(size + ctlr_size, GFP_KERNEL); 3243 if (!ctlr) 3244 return NULL; 3245 3246 ctlr->pcpu_statistics = spi_alloc_pcpu_stats(); 3247 if (!ctlr->pcpu_statistics) { 3248 kfree(ctlr); 3249 return NULL; 3250 } 3251 3252 device_initialize(&ctlr->dev); 3253 INIT_LIST_HEAD(&ctlr->queue); 3254 spin_lock_init(&ctlr->queue_lock); 3255 spin_lock_init(&ctlr->bus_lock_spinlock); 3256 mutex_init(&ctlr->bus_lock_mutex); 3257 mutex_init(&ctlr->io_mutex); 3258 mutex_init(&ctlr->add_lock); 3259 ctlr->bus_num = -1; 3260 ctlr->num_chipselect = 1; 3261 ctlr->num_data_lanes = 1; 3262 ctlr->target = target; 3263 if (IS_ENABLED(CONFIG_SPI_SLAVE) && target) 3264 ctlr->dev.class = &spi_target_class; 3265 else 3266 ctlr->dev.class = &spi_controller_class; 3267 ctlr->dev.parent = dev; 3268 3269 device_set_node(&ctlr->dev, dev_fwnode(dev)); 3270 3271 pm_suspend_ignore_children(&ctlr->dev, true); 3272 spi_controller_set_devdata(ctlr, (void *)ctlr + ctlr_size); 3273 3274 return ctlr; 3275 } 3276 EXPORT_SYMBOL_GPL(__spi_alloc_controller); 3277 3278 static void devm_spi_release_controller(void *ctlr) 3279 { 3280 spi_controller_put(ctlr); 3281 } 3282 3283 /** 3284 * __devm_spi_alloc_controller - resource-managed __spi_alloc_controller() 3285 * @dev: physical device of SPI controller 3286 * @size: how much zeroed driver-private data to allocate 3287 * @target: whether to allocate an SPI host (false) or SPI target (true) controller 3288 * Context: can sleep 3289 * 3290 * Allocate an SPI controller and automatically release a reference on it 3291 * when @dev is unbound from its driver. Drivers are thus relieved from 3292 * having to call spi_controller_put(). 3293 * 3294 * The arguments to this function are identical to __spi_alloc_controller(). 3295 * 3296 * Return: the SPI controller structure on success, else NULL. 3297 */ 3298 struct spi_controller *__devm_spi_alloc_controller(struct device *dev, 3299 unsigned int size, 3300 bool target) 3301 { 3302 struct spi_controller *ctlr; 3303 int ret; 3304 3305 ctlr = __spi_alloc_controller(dev, size, target); 3306 if (!ctlr) 3307 return NULL; 3308 3309 ret = devm_add_action_or_reset(dev, devm_spi_release_controller, ctlr); 3310 if (ret) 3311 return NULL; 3312 3313 ctlr->devm_allocated = true; 3314 3315 return ctlr; 3316 } 3317 EXPORT_SYMBOL_GPL(__devm_spi_alloc_controller); 3318 3319 /** 3320 * spi_get_gpio_descs() - grab chip select GPIOs for the controller 3321 * @ctlr: The SPI controller to grab GPIO descriptors for 3322 */ 3323 static int spi_get_gpio_descs(struct spi_controller *ctlr) 3324 { 3325 int nb, i; 3326 struct gpio_desc **cs; 3327 struct device *dev = &ctlr->dev; 3328 unsigned long native_cs_mask = 0; 3329 unsigned int num_cs_gpios = 0; 3330 3331 nb = gpiod_count(dev, "cs"); 3332 if (nb < 0) { 3333 /* No GPIOs at all is fine, else return the error */ 3334 if (nb == -ENOENT) 3335 return 0; 3336 return nb; 3337 } 3338 3339 ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect); 3340 3341 cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs), 3342 GFP_KERNEL); 3343 if (!cs) 3344 return -ENOMEM; 3345 ctlr->cs_gpiods = cs; 3346 3347 for (i = 0; i < nb; i++) { 3348 /* 3349 * Most chipselects are active low, the inverted 3350 * semantics are handled by special quirks in gpiolib, 3351 * so initializing them GPIOD_OUT_LOW here means 3352 * "unasserted", in most cases this will drive the physical 3353 * line high. 3354 */ 3355 cs[i] = devm_gpiod_get_index_optional(dev, "cs", i, 3356 GPIOD_OUT_LOW); 3357 if (IS_ERR(cs[i])) 3358 return PTR_ERR(cs[i]); 3359 3360 if (cs[i]) { 3361 /* 3362 * If we find a CS GPIO, name it after the device and 3363 * chip select line. 3364 */ 3365 char *gpioname; 3366 3367 gpioname = devm_kasprintf(dev, GFP_KERNEL, "%s CS%d", 3368 dev_name(dev), i); 3369 if (!gpioname) 3370 return -ENOMEM; 3371 gpiod_set_consumer_name(cs[i], gpioname); 3372 num_cs_gpios++; 3373 continue; 3374 } 3375 3376 if (ctlr->max_native_cs && i >= ctlr->max_native_cs) { 3377 dev_err(dev, "Invalid native chip select %d\n", i); 3378 return -EINVAL; 3379 } 3380 native_cs_mask |= BIT(i); 3381 } 3382 3383 ctlr->unused_native_cs = ffs(~native_cs_mask) - 1; 3384 3385 if ((ctlr->flags & SPI_CONTROLLER_GPIO_SS) && num_cs_gpios && 3386 ctlr->max_native_cs && ctlr->unused_native_cs >= ctlr->max_native_cs) { 3387 dev_err(dev, "No unused native chip select available\n"); 3388 return -EINVAL; 3389 } 3390 3391 return 0; 3392 } 3393 3394 static int spi_controller_check_ops(struct spi_controller *ctlr) 3395 { 3396 /* 3397 * The controller may implement only the high-level SPI-memory like 3398 * operations if it does not support regular SPI transfers, and this is 3399 * valid use case. 3400 * If ->mem_ops or ->mem_ops->exec_op is NULL, we request that at least 3401 * one of the ->transfer_xxx() method be implemented. 3402 */ 3403 if (!ctlr->mem_ops || !ctlr->mem_ops->exec_op) { 3404 if (!ctlr->transfer && !ctlr->transfer_one && 3405 !ctlr->transfer_one_message) { 3406 return -EINVAL; 3407 } 3408 } 3409 3410 return 0; 3411 } 3412 3413 /* Allocate dynamic bus number using Linux idr */ 3414 static int spi_controller_id_alloc(struct spi_controller *ctlr, int start, int end) 3415 { 3416 int id; 3417 3418 mutex_lock(&board_lock); 3419 id = idr_alloc(&spi_controller_idr, ctlr, start, end, GFP_KERNEL); 3420 mutex_unlock(&board_lock); 3421 if (WARN(id < 0, "couldn't get idr")) 3422 return id == -ENOSPC ? -EBUSY : id; 3423 ctlr->bus_num = id; 3424 return 0; 3425 } 3426 3427 /** 3428 * spi_register_controller - register SPI host or target controller 3429 * @ctlr: initialized controller, originally from spi_alloc_host() or 3430 * spi_alloc_target() 3431 * Context: can sleep 3432 * 3433 * SPI controllers connect to their drivers using some non-SPI bus, 3434 * such as the platform bus. The final stage of probe() in that code 3435 * includes calling spi_register_controller() to hook up to this SPI bus glue. 3436 * 3437 * SPI controllers use board specific (often SOC specific) bus numbers, 3438 * and board-specific addressing for SPI devices combines those numbers 3439 * with chip select numbers. Since SPI does not directly support dynamic 3440 * device identification, boards need configuration tables telling which 3441 * chip is at which address. 3442 * 3443 * This must be called from context that can sleep. 3444 * 3445 * After a successful return, the caller is responsible for calling 3446 * spi_unregister_controller(). 3447 * 3448 * Return: zero on success, else a negative error code. 3449 */ 3450 int spi_register_controller(struct spi_controller *ctlr) 3451 { 3452 struct device *dev = ctlr->dev.parent; 3453 struct boardinfo *bi; 3454 int first_dynamic; 3455 int status; 3456 int idx; 3457 3458 if (!dev) 3459 return -ENODEV; 3460 3461 /* 3462 * Make sure all necessary hooks are implemented before registering 3463 * the SPI controller. 3464 */ 3465 status = spi_controller_check_ops(ctlr); 3466 if (status) 3467 return status; 3468 3469 if (ctlr->bus_num < 0) 3470 ctlr->bus_num = of_alias_get_id(ctlr->dev.of_node, "spi"); 3471 if (ctlr->bus_num >= 0) { 3472 /* Devices with a fixed bus num must check-in with the num */ 3473 status = spi_controller_id_alloc(ctlr, ctlr->bus_num, ctlr->bus_num + 1); 3474 if (status) 3475 return status; 3476 } 3477 if (ctlr->bus_num < 0) { 3478 first_dynamic = of_alias_get_highest_id("spi"); 3479 if (first_dynamic < 0) 3480 first_dynamic = 0; 3481 else 3482 first_dynamic++; 3483 3484 status = spi_controller_id_alloc(ctlr, first_dynamic, 0); 3485 if (status) 3486 return status; 3487 } 3488 ctlr->bus_lock_flag = 0; 3489 init_completion(&ctlr->xfer_completion); 3490 init_completion(&ctlr->cur_msg_completion); 3491 if (!ctlr->max_dma_len) 3492 ctlr->max_dma_len = INT_MAX; 3493 3494 /* 3495 * Register the device, then userspace will see it. 3496 * Registration fails if the bus ID is in use. 3497 */ 3498 dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num); 3499 3500 if (!spi_controller_is_target(ctlr) && ctlr->use_gpio_descriptors) { 3501 status = spi_get_gpio_descs(ctlr); 3502 if (status) 3503 goto free_bus_id; 3504 /* 3505 * A controller using GPIO descriptors always 3506 * supports SPI_CS_HIGH if need be. 3507 */ 3508 ctlr->mode_bits |= SPI_CS_HIGH; 3509 } 3510 3511 /* 3512 * Even if it's just one always-selected device, there must 3513 * be at least one chipselect. 3514 */ 3515 if (!ctlr->num_chipselect) { 3516 status = -EINVAL; 3517 goto free_bus_id; 3518 } 3519 3520 /* Setting last_cs to SPI_INVALID_CS means no chip selected */ 3521 for (idx = 0; idx < SPI_DEVICE_CS_CNT_MAX; idx++) 3522 ctlr->last_cs[idx] = SPI_INVALID_CS; 3523 3524 status = device_add(&ctlr->dev); 3525 if (status < 0) 3526 goto free_bus_id; 3527 dev_dbg(dev, "registered %s %s\n", 3528 spi_controller_is_target(ctlr) ? "target" : "host", 3529 dev_name(&ctlr->dev)); 3530 3531 /* 3532 * If we're using a queued driver, start the queue. Note that we don't 3533 * need the queueing logic if the driver is only supporting high-level 3534 * memory operations. 3535 */ 3536 if (ctlr->transfer) { 3537 dev_info(dev, "controller is unqueued, this is deprecated\n"); 3538 } else if (ctlr->transfer_one || ctlr->transfer_one_message) { 3539 status = spi_controller_initialize_queue(ctlr); 3540 if (status) 3541 goto del_ctrl; 3542 } 3543 3544 mutex_lock(&board_lock); 3545 list_add_tail(&ctlr->list, &spi_controller_list); 3546 list_for_each_entry(bi, &board_list, list) 3547 spi_match_controller_to_boardinfo(ctlr, &bi->board_info); 3548 mutex_unlock(&board_lock); 3549 3550 /* Register devices from the device tree and ACPI */ 3551 of_register_spi_devices(ctlr); 3552 acpi_register_spi_devices(ctlr); 3553 return status; 3554 3555 del_ctrl: 3556 device_del(&ctlr->dev); 3557 free_bus_id: 3558 mutex_lock(&board_lock); 3559 idr_remove(&spi_controller_idr, ctlr->bus_num); 3560 mutex_unlock(&board_lock); 3561 return status; 3562 } 3563 EXPORT_SYMBOL_GPL(spi_register_controller); 3564 3565 static void devm_spi_unregister_controller(void *ctlr) 3566 { 3567 spi_unregister_controller(ctlr); 3568 } 3569 3570 /** 3571 * devm_spi_register_controller - register managed SPI host or target controller 3572 * @dev: device managing SPI controller 3573 * @ctlr: initialized controller, originally from spi_alloc_host() or 3574 * spi_alloc_target() 3575 * Context: can sleep 3576 * 3577 * Register a SPI device as with spi_register_controller() which will 3578 * automatically be unregistered (and freed unless it has been allocated using 3579 * devm_spi_alloc_host/target()). 3580 * 3581 * Return: zero on success, else a negative error code. 3582 */ 3583 int devm_spi_register_controller(struct device *dev, 3584 struct spi_controller *ctlr) 3585 { 3586 int ret; 3587 3588 ret = spi_register_controller(ctlr); 3589 if (ret) 3590 return ret; 3591 3592 /* 3593 * Prevent controller from being freed by spi_unregister_controller() 3594 * if devm_add_action_or_reset() fails for a non-devres allocated 3595 * controller. 3596 */ 3597 spi_controller_get(ctlr); 3598 3599 ret = devm_add_action_or_reset(dev, devm_spi_unregister_controller, ctlr); 3600 3601 if (ret == 0 || ctlr->devm_allocated) 3602 spi_controller_put(ctlr); 3603 3604 return ret; 3605 } 3606 EXPORT_SYMBOL_GPL(devm_spi_register_controller); 3607 3608 static int __unregister(struct device *dev, void *null) 3609 { 3610 spi_unregister_device(to_spi_device(dev)); 3611 return 0; 3612 } 3613 3614 /** 3615 * spi_unregister_controller - unregister SPI host or target controller 3616 * @ctlr: the controller being unregistered 3617 * Context: can sleep 3618 * 3619 * This call is used only by SPI controller drivers, which are the 3620 * only ones directly touching chip registers. 3621 * 3622 * This must be called from context that can sleep. 3623 * 3624 * Note that this function also drops a reference to the controller unless it 3625 * has been allocated using devm_spi_alloc_host/target(). 3626 */ 3627 void spi_unregister_controller(struct spi_controller *ctlr) 3628 { 3629 struct spi_controller *found; 3630 int id = ctlr->bus_num; 3631 3632 /* Prevent addition of new devices, unregister existing ones */ 3633 if (IS_ENABLED(CONFIG_SPI_DYNAMIC)) 3634 mutex_lock(&ctlr->add_lock); 3635 3636 device_for_each_child(&ctlr->dev, NULL, __unregister); 3637 3638 /* First make sure that this controller was ever added */ 3639 mutex_lock(&board_lock); 3640 found = idr_find(&spi_controller_idr, id); 3641 mutex_unlock(&board_lock); 3642 if (ctlr->queued) { 3643 if (spi_destroy_queue(ctlr)) 3644 dev_err(&ctlr->dev, "queue remove failed\n"); 3645 } 3646 mutex_lock(&board_lock); 3647 list_del(&ctlr->list); 3648 mutex_unlock(&board_lock); 3649 3650 device_del(&ctlr->dev); 3651 3652 /* Free bus id */ 3653 mutex_lock(&board_lock); 3654 if (found == ctlr) 3655 idr_remove(&spi_controller_idr, id); 3656 mutex_unlock(&board_lock); 3657 3658 if (IS_ENABLED(CONFIG_SPI_DYNAMIC)) 3659 mutex_unlock(&ctlr->add_lock); 3660 3661 /* 3662 * Release the last reference on the controller if its driver 3663 * has not yet been converted to devm_spi_alloc_host/target(). 3664 */ 3665 if (!ctlr->devm_allocated) 3666 put_device(&ctlr->dev); 3667 } 3668 EXPORT_SYMBOL_GPL(spi_unregister_controller); 3669 3670 static inline int __spi_check_suspended(const struct spi_controller *ctlr) 3671 { 3672 return ctlr->flags & SPI_CONTROLLER_SUSPENDED ? -ESHUTDOWN : 0; 3673 } 3674 3675 static inline void __spi_mark_suspended(struct spi_controller *ctlr) 3676 { 3677 mutex_lock(&ctlr->bus_lock_mutex); 3678 ctlr->flags |= SPI_CONTROLLER_SUSPENDED; 3679 mutex_unlock(&ctlr->bus_lock_mutex); 3680 } 3681 3682 static inline void __spi_mark_resumed(struct spi_controller *ctlr) 3683 { 3684 mutex_lock(&ctlr->bus_lock_mutex); 3685 ctlr->flags &= ~SPI_CONTROLLER_SUSPENDED; 3686 mutex_unlock(&ctlr->bus_lock_mutex); 3687 } 3688 3689 int spi_controller_suspend(struct spi_controller *ctlr) 3690 { 3691 int ret = 0; 3692 3693 /* Basically no-ops for non-queued controllers */ 3694 if (ctlr->queued) { 3695 ret = spi_stop_queue(ctlr); 3696 if (ret) 3697 dev_err(&ctlr->dev, "queue stop failed\n"); 3698 } 3699 3700 __spi_mark_suspended(ctlr); 3701 return ret; 3702 } 3703 EXPORT_SYMBOL_GPL(spi_controller_suspend); 3704 3705 int spi_controller_resume(struct spi_controller *ctlr) 3706 { 3707 int ret = 0; 3708 3709 __spi_mark_resumed(ctlr); 3710 3711 if (ctlr->queued) { 3712 ret = spi_start_queue(ctlr); 3713 if (ret) 3714 dev_err(&ctlr->dev, "queue restart failed\n"); 3715 } 3716 return ret; 3717 } 3718 EXPORT_SYMBOL_GPL(spi_controller_resume); 3719 3720 /*-------------------------------------------------------------------------*/ 3721 3722 /* Core methods for spi_message alterations */ 3723 3724 static void __spi_replace_transfers_release(struct spi_controller *ctlr, 3725 struct spi_message *msg, 3726 void *res) 3727 { 3728 struct spi_replaced_transfers *rxfer = res; 3729 size_t i; 3730 3731 /* Call extra callback if requested */ 3732 if (rxfer->release) 3733 rxfer->release(ctlr, msg, res); 3734 3735 /* Insert replaced transfers back into the message */ 3736 list_splice(&rxfer->replaced_transfers, rxfer->replaced_after); 3737 3738 /* Remove the formerly inserted entries */ 3739 for (i = 0; i < rxfer->inserted; i++) 3740 list_del(&rxfer->inserted_transfers[i].transfer_list); 3741 } 3742 3743 /** 3744 * spi_replace_transfers - replace transfers with several transfers 3745 * and register change with spi_message.resources 3746 * @msg: the spi_message we work upon 3747 * @xfer_first: the first spi_transfer we want to replace 3748 * @remove: number of transfers to remove 3749 * @insert: the number of transfers we want to insert instead 3750 * @release: extra release code necessary in some circumstances 3751 * @extradatasize: extra data to allocate (with alignment guarantees 3752 * of struct @spi_transfer) 3753 * @gfp: gfp flags 3754 * 3755 * Returns: pointer to @spi_replaced_transfers, 3756 * PTR_ERR(...) in case of errors. 3757 */ 3758 static struct spi_replaced_transfers *spi_replace_transfers( 3759 struct spi_message *msg, 3760 struct spi_transfer *xfer_first, 3761 size_t remove, 3762 size_t insert, 3763 spi_replaced_release_t release, 3764 size_t extradatasize, 3765 gfp_t gfp) 3766 { 3767 struct spi_replaced_transfers *rxfer; 3768 struct spi_transfer *xfer; 3769 size_t i; 3770 3771 /* Allocate the structure using spi_res */ 3772 rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release, 3773 struct_size(rxfer, inserted_transfers, insert) 3774 + extradatasize, 3775 gfp); 3776 if (!rxfer) 3777 return ERR_PTR(-ENOMEM); 3778 3779 /* The release code to invoke before running the generic release */ 3780 rxfer->release = release; 3781 3782 /* Assign extradata */ 3783 if (extradatasize) 3784 rxfer->extradata = 3785 &rxfer->inserted_transfers[insert]; 3786 3787 /* Init the replaced_transfers list */ 3788 INIT_LIST_HEAD(&rxfer->replaced_transfers); 3789 3790 /* 3791 * Assign the list_entry after which we should reinsert 3792 * the @replaced_transfers - it may be spi_message.messages! 3793 */ 3794 rxfer->replaced_after = xfer_first->transfer_list.prev; 3795 3796 /* Remove the requested number of transfers */ 3797 for (i = 0; i < remove; i++) { 3798 /* 3799 * If the entry after replaced_after it is msg->transfers 3800 * then we have been requested to remove more transfers 3801 * than are in the list. 3802 */ 3803 if (rxfer->replaced_after->next == &msg->transfers) { 3804 dev_err(&msg->spi->dev, 3805 "requested to remove more spi_transfers than are available\n"); 3806 /* Insert replaced transfers back into the message */ 3807 list_splice(&rxfer->replaced_transfers, 3808 rxfer->replaced_after); 3809 3810 /* Free the spi_replace_transfer structure... */ 3811 spi_res_free(rxfer); 3812 3813 /* ...and return with an error */ 3814 return ERR_PTR(-EINVAL); 3815 } 3816 3817 /* 3818 * Remove the entry after replaced_after from list of 3819 * transfers and add it to list of replaced_transfers. 3820 */ 3821 list_move_tail(rxfer->replaced_after->next, 3822 &rxfer->replaced_transfers); 3823 } 3824 3825 /* 3826 * Create copy of the given xfer with identical settings 3827 * based on the first transfer to get removed. 3828 */ 3829 for (i = 0; i < insert; i++) { 3830 /* We need to run in reverse order */ 3831 xfer = &rxfer->inserted_transfers[insert - 1 - i]; 3832 3833 /* Copy all spi_transfer data */ 3834 memcpy(xfer, xfer_first, sizeof(*xfer)); 3835 3836 /* Add to list */ 3837 list_add(&xfer->transfer_list, rxfer->replaced_after); 3838 3839 /* Clear cs_change and delay for all but the last */ 3840 if (i) { 3841 xfer->cs_change = false; 3842 xfer->delay.value = 0; 3843 } 3844 } 3845 3846 /* Set up inserted... */ 3847 rxfer->inserted = insert; 3848 3849 /* ...and register it with spi_res/spi_message */ 3850 spi_res_add(msg, rxfer); 3851 3852 return rxfer; 3853 } 3854 3855 static int __spi_split_transfer_maxsize(struct spi_controller *ctlr, 3856 struct spi_message *msg, 3857 struct spi_transfer **xferp, 3858 size_t maxsize) 3859 { 3860 struct spi_transfer *xfer = *xferp, *xfers; 3861 struct spi_replaced_transfers *srt; 3862 size_t offset; 3863 size_t count, i; 3864 3865 /* Calculate how many we have to replace */ 3866 count = DIV_ROUND_UP(xfer->len, maxsize); 3867 3868 /* Create replacement */ 3869 srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, GFP_KERNEL); 3870 if (IS_ERR(srt)) 3871 return PTR_ERR(srt); 3872 xfers = srt->inserted_transfers; 3873 3874 /* 3875 * Now handle each of those newly inserted spi_transfers. 3876 * Note that the replacements spi_transfers all are preset 3877 * to the same values as *xferp, so tx_buf, rx_buf and len 3878 * are all identical (as well as most others) 3879 * so we just have to fix up len and the pointers. 3880 */ 3881 3882 /* 3883 * The first transfer just needs the length modified, so we 3884 * run it outside the loop. 3885 */ 3886 xfers[0].len = min_t(size_t, maxsize, xfer[0].len); 3887 3888 /* All the others need rx_buf/tx_buf also set */ 3889 for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) { 3890 /* Update rx_buf, tx_buf and DMA */ 3891 if (xfers[i].rx_buf) 3892 xfers[i].rx_buf += offset; 3893 if (xfers[i].tx_buf) 3894 xfers[i].tx_buf += offset; 3895 3896 /* Update length */ 3897 xfers[i].len = min(maxsize, xfers[i].len - offset); 3898 } 3899 3900 /* 3901 * We set up xferp to the last entry we have inserted, 3902 * so that we skip those already split transfers. 3903 */ 3904 *xferp = &xfers[count - 1]; 3905 3906 /* Increment statistics counters */ 3907 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, 3908 transfers_split_maxsize); 3909 SPI_STATISTICS_INCREMENT_FIELD(msg->spi->pcpu_statistics, 3910 transfers_split_maxsize); 3911 3912 return 0; 3913 } 3914 3915 /** 3916 * spi_split_transfers_maxsize - split spi transfers into multiple transfers 3917 * when an individual transfer exceeds a 3918 * certain size 3919 * @ctlr: the @spi_controller for this transfer 3920 * @msg: the @spi_message to transform 3921 * @maxsize: the maximum when to apply this 3922 * 3923 * This function allocates resources that are automatically freed during the 3924 * spi message unoptimize phase so this function should only be called from 3925 * optimize_message callbacks. 3926 * 3927 * Return: status of transformation 3928 */ 3929 int spi_split_transfers_maxsize(struct spi_controller *ctlr, 3930 struct spi_message *msg, 3931 size_t maxsize) 3932 { 3933 struct spi_transfer *xfer; 3934 int ret; 3935 3936 /* 3937 * Iterate over the transfer_list, 3938 * but note that xfer is advanced to the last transfer inserted 3939 * to avoid checking sizes again unnecessarily (also xfer does 3940 * potentially belong to a different list by the time the 3941 * replacement has happened). 3942 */ 3943 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 3944 if (xfer->len > maxsize) { 3945 ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer, 3946 maxsize); 3947 if (ret) 3948 return ret; 3949 } 3950 } 3951 3952 return 0; 3953 } 3954 EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize); 3955 3956 3957 /** 3958 * spi_split_transfers_maxwords - split SPI transfers into multiple transfers 3959 * when an individual transfer exceeds a 3960 * certain number of SPI words 3961 * @ctlr: the @spi_controller for this transfer 3962 * @msg: the @spi_message to transform 3963 * @maxwords: the number of words to limit each transfer to 3964 * 3965 * This function allocates resources that are automatically freed during the 3966 * spi message unoptimize phase so this function should only be called from 3967 * optimize_message callbacks. 3968 * 3969 * Return: status of transformation 3970 */ 3971 int spi_split_transfers_maxwords(struct spi_controller *ctlr, 3972 struct spi_message *msg, 3973 size_t maxwords) 3974 { 3975 struct spi_transfer *xfer; 3976 3977 /* 3978 * Iterate over the transfer_list, 3979 * but note that xfer is advanced to the last transfer inserted 3980 * to avoid checking sizes again unnecessarily (also xfer does 3981 * potentially belong to a different list by the time the 3982 * replacement has happened). 3983 */ 3984 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 3985 size_t maxsize; 3986 int ret; 3987 3988 maxsize = maxwords * spi_bpw_to_bytes(xfer->bits_per_word); 3989 if (xfer->len > maxsize) { 3990 ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer, 3991 maxsize); 3992 if (ret) 3993 return ret; 3994 } 3995 } 3996 3997 return 0; 3998 } 3999 EXPORT_SYMBOL_GPL(spi_split_transfers_maxwords); 4000 4001 /*-------------------------------------------------------------------------*/ 4002 4003 /* 4004 * Core methods for SPI controller protocol drivers. Some of the 4005 * other core methods are currently defined as inline functions. 4006 */ 4007 4008 static int __spi_validate_bits_per_word(struct spi_controller *ctlr, 4009 u8 bits_per_word) 4010 { 4011 if (ctlr->bits_per_word_mask) { 4012 /* Only 32 bits fit in the mask */ 4013 if (bits_per_word > 32) 4014 return -EINVAL; 4015 if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word))) 4016 return -EINVAL; 4017 } 4018 4019 return 0; 4020 } 4021 4022 /** 4023 * spi_set_cs_timing - configure CS setup, hold, and inactive delays 4024 * @spi: the device that requires specific CS timing configuration 4025 * 4026 * Return: zero on success, else a negative error code. 4027 */ 4028 static int spi_set_cs_timing(struct spi_device *spi) 4029 { 4030 struct device *parent = spi->controller->dev.parent; 4031 int status = 0; 4032 4033 if (spi->controller->set_cs_timing && !spi_get_csgpiod(spi, 0)) { 4034 if (spi->controller->auto_runtime_pm) { 4035 status = pm_runtime_get_sync(parent); 4036 if (status < 0) { 4037 pm_runtime_put_noidle(parent); 4038 dev_err(&spi->controller->dev, "Failed to power device: %d\n", 4039 status); 4040 return status; 4041 } 4042 4043 status = spi->controller->set_cs_timing(spi); 4044 pm_runtime_put_autosuspend(parent); 4045 } else { 4046 status = spi->controller->set_cs_timing(spi); 4047 } 4048 } 4049 return status; 4050 } 4051 4052 /** 4053 * spi_setup - setup SPI mode and clock rate 4054 * @spi: the device whose settings are being modified 4055 * Context: can sleep, and no requests are queued to the device 4056 * 4057 * SPI protocol drivers may need to update the transfer mode if the 4058 * device doesn't work with its default. They may likewise need 4059 * to update clock rates or word sizes from initial values. This function 4060 * changes those settings, and must be called from a context that can sleep. 4061 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take 4062 * effect the next time the device is selected and data is transferred to 4063 * or from it. When this function returns, the SPI device is deselected. 4064 * 4065 * Note that this call will fail if the protocol driver specifies an option 4066 * that the underlying controller or its driver does not support. For 4067 * example, not all hardware supports wire transfers using nine bit words, 4068 * LSB-first wire encoding, or active-high chipselects. 4069 * 4070 * Return: zero on success, else a negative error code. 4071 */ 4072 int spi_setup(struct spi_device *spi) 4073 { 4074 unsigned bad_bits, ugly_bits; 4075 int status; 4076 4077 /* 4078 * Check mode to prevent that any two of DUAL, QUAD and NO_MOSI/MISO 4079 * are set at the same time. 4080 */ 4081 if ((hweight_long(spi->mode & 4082 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_NO_TX)) > 1) || 4083 (hweight_long(spi->mode & 4084 (SPI_RX_DUAL | SPI_RX_QUAD | SPI_NO_RX)) > 1)) { 4085 dev_err(&spi->dev, 4086 "setup: can not select any two of dual, quad and no-rx/tx at the same time\n"); 4087 return -EINVAL; 4088 } 4089 /* If it is SPI_3WIRE mode, DUAL and QUAD should be forbidden */ 4090 if ((spi->mode & SPI_3WIRE) && (spi->mode & 4091 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL | 4092 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL))) 4093 return -EINVAL; 4094 /* Check against conflicting MOSI idle configuration */ 4095 if ((spi->mode & SPI_MOSI_IDLE_LOW) && (spi->mode & SPI_MOSI_IDLE_HIGH)) { 4096 dev_err(&spi->dev, 4097 "setup: MOSI configured to idle low and high at the same time.\n"); 4098 return -EINVAL; 4099 } 4100 /* 4101 * Help drivers fail *cleanly* when they need options 4102 * that aren't supported with their current controller. 4103 * SPI_CS_WORD has a fallback software implementation, 4104 * so it is ignored here. 4105 */ 4106 bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD | 4107 SPI_NO_TX | SPI_NO_RX); 4108 ugly_bits = bad_bits & 4109 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL | 4110 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL); 4111 if (ugly_bits) { 4112 dev_warn(&spi->dev, 4113 "setup: ignoring unsupported mode bits %x\n", 4114 ugly_bits); 4115 spi->mode &= ~ugly_bits; 4116 bad_bits &= ~ugly_bits; 4117 } 4118 if (bad_bits) { 4119 dev_err(&spi->dev, "setup: unsupported mode bits %x\n", 4120 bad_bits); 4121 return -EINVAL; 4122 } 4123 4124 if (!spi->bits_per_word) { 4125 spi->bits_per_word = 8; 4126 } else { 4127 /* 4128 * Some controllers may not support the default 8 bits-per-word 4129 * so only perform the check when this is explicitly provided. 4130 */ 4131 status = __spi_validate_bits_per_word(spi->controller, 4132 spi->bits_per_word); 4133 if (status) 4134 return status; 4135 } 4136 4137 if (spi->controller->max_speed_hz && 4138 (!spi->max_speed_hz || 4139 spi->max_speed_hz > spi->controller->max_speed_hz)) 4140 spi->max_speed_hz = spi->controller->max_speed_hz; 4141 4142 mutex_lock(&spi->controller->io_mutex); 4143 4144 if (spi->controller->setup) { 4145 status = spi->controller->setup(spi); 4146 if (status) { 4147 mutex_unlock(&spi->controller->io_mutex); 4148 dev_err(&spi->controller->dev, "Failed to setup device: %d\n", 4149 status); 4150 return status; 4151 } 4152 } 4153 4154 status = spi_set_cs_timing(spi); 4155 if (status) { 4156 mutex_unlock(&spi->controller->io_mutex); 4157 return status; 4158 } 4159 4160 if (spi->controller->auto_runtime_pm && spi->controller->set_cs) { 4161 status = pm_runtime_resume_and_get(spi->controller->dev.parent); 4162 if (status < 0) { 4163 mutex_unlock(&spi->controller->io_mutex); 4164 dev_err(&spi->controller->dev, "Failed to power device: %d\n", 4165 status); 4166 return status; 4167 } 4168 4169 /* 4170 * We do not want to return positive value from pm_runtime_get, 4171 * there are many instances of devices calling spi_setup() and 4172 * checking for a non-zero return value instead of a negative 4173 * return value. 4174 */ 4175 status = 0; 4176 4177 spi_set_cs(spi, false, true); 4178 pm_runtime_put_autosuspend(spi->controller->dev.parent); 4179 } else { 4180 spi_set_cs(spi, false, true); 4181 } 4182 4183 mutex_unlock(&spi->controller->io_mutex); 4184 4185 if (spi->rt && !spi->controller->rt) { 4186 spi->controller->rt = true; 4187 spi_set_thread_rt(spi->controller); 4188 } 4189 4190 trace_spi_setup(spi, status); 4191 4192 dev_dbg(&spi->dev, "setup mode %lu, %s%s%s%s%u bits/w, %u Hz max --> %d\n", 4193 spi->mode & SPI_MODE_X_MASK, 4194 (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "", 4195 (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "", 4196 (spi->mode & SPI_3WIRE) ? "3wire, " : "", 4197 (spi->mode & SPI_LOOP) ? "loopback, " : "", 4198 spi->bits_per_word, spi->max_speed_hz, 4199 status); 4200 4201 return status; 4202 } 4203 EXPORT_SYMBOL_GPL(spi_setup); 4204 4205 static int _spi_xfer_word_delay_update(struct spi_transfer *xfer, 4206 struct spi_device *spi) 4207 { 4208 int delay1, delay2; 4209 4210 delay1 = spi_delay_to_ns(&xfer->word_delay, xfer); 4211 if (delay1 < 0) 4212 return delay1; 4213 4214 delay2 = spi_delay_to_ns(&spi->word_delay, xfer); 4215 if (delay2 < 0) 4216 return delay2; 4217 4218 if (delay1 < delay2) 4219 memcpy(&xfer->word_delay, &spi->word_delay, 4220 sizeof(xfer->word_delay)); 4221 4222 return 0; 4223 } 4224 4225 static int __spi_validate(struct spi_device *spi, struct spi_message *message) 4226 { 4227 struct spi_controller *ctlr = spi->controller; 4228 struct spi_transfer *xfer; 4229 int w_size; 4230 4231 if (list_empty(&message->transfers)) 4232 return -EINVAL; 4233 4234 message->spi = spi; 4235 4236 /* 4237 * Half-duplex links include original MicroWire, and ones with 4238 * only one data pin like SPI_3WIRE (switches direction) or where 4239 * either MOSI or MISO is missing. They can also be caused by 4240 * software limitations. 4241 */ 4242 if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) || 4243 (spi->mode & SPI_3WIRE)) { 4244 unsigned flags = ctlr->flags; 4245 4246 list_for_each_entry(xfer, &message->transfers, transfer_list) { 4247 if (xfer->rx_buf && xfer->tx_buf) 4248 return -EINVAL; 4249 if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf) 4250 return -EINVAL; 4251 if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf) 4252 return -EINVAL; 4253 } 4254 } 4255 4256 /* 4257 * Set transfer bits_per_word and max speed as spi device default if 4258 * it is not set for this transfer. 4259 * Set transfer tx_nbits and rx_nbits as single transfer default 4260 * (SPI_NBITS_SINGLE) if it is not set for this transfer. 4261 * Ensure transfer word_delay is at least as long as that required by 4262 * device itself. 4263 */ 4264 message->frame_length = 0; 4265 list_for_each_entry(xfer, &message->transfers, transfer_list) { 4266 xfer->effective_speed_hz = 0; 4267 message->frame_length += xfer->len; 4268 if (!xfer->bits_per_word) 4269 xfer->bits_per_word = spi->bits_per_word; 4270 4271 if (!xfer->speed_hz) 4272 xfer->speed_hz = spi->max_speed_hz; 4273 4274 if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz) 4275 xfer->speed_hz = ctlr->max_speed_hz; 4276 4277 if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word)) 4278 return -EINVAL; 4279 4280 /* DDR mode is supported only if controller has dtr_caps=true. 4281 * default considered as SDR mode for SPI and QSPI controller. 4282 * Note: This is applicable only to QSPI controller. 4283 */ 4284 if (xfer->dtr_mode && !ctlr->dtr_caps) 4285 return -EINVAL; 4286 4287 /* 4288 * SPI transfer length should be multiple of SPI word size 4289 * where SPI word size should be power-of-two multiple. 4290 */ 4291 w_size = spi_bpw_to_bytes(xfer->bits_per_word); 4292 4293 /* No partial transfers accepted */ 4294 if (xfer->len % w_size) 4295 return -EINVAL; 4296 4297 if (xfer->speed_hz && ctlr->min_speed_hz && 4298 xfer->speed_hz < ctlr->min_speed_hz) 4299 return -EINVAL; 4300 4301 if (xfer->tx_buf && !xfer->tx_nbits) 4302 xfer->tx_nbits = SPI_NBITS_SINGLE; 4303 if (xfer->rx_buf && !xfer->rx_nbits) 4304 xfer->rx_nbits = SPI_NBITS_SINGLE; 4305 /* 4306 * Check transfer tx/rx_nbits: 4307 * 1. check the value matches one of single, dual and quad 4308 * 2. check tx/rx_nbits match the mode in spi_device 4309 */ 4310 if (xfer->tx_buf) { 4311 if (spi->mode & SPI_NO_TX) 4312 return -EINVAL; 4313 if (xfer->tx_nbits != SPI_NBITS_SINGLE && 4314 xfer->tx_nbits != SPI_NBITS_DUAL && 4315 xfer->tx_nbits != SPI_NBITS_QUAD && 4316 xfer->tx_nbits != SPI_NBITS_OCTAL) 4317 return -EINVAL; 4318 if ((xfer->tx_nbits == SPI_NBITS_DUAL) && 4319 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL))) 4320 return -EINVAL; 4321 if ((xfer->tx_nbits == SPI_NBITS_QUAD) && 4322 !(spi->mode & (SPI_TX_QUAD | SPI_TX_OCTAL))) 4323 return -EINVAL; 4324 if ((xfer->tx_nbits == SPI_NBITS_OCTAL) && 4325 !(spi->mode & SPI_TX_OCTAL)) 4326 return -EINVAL; 4327 } 4328 /* Check transfer rx_nbits */ 4329 if (xfer->rx_buf) { 4330 if (spi->mode & SPI_NO_RX) 4331 return -EINVAL; 4332 if (xfer->rx_nbits != SPI_NBITS_SINGLE && 4333 xfer->rx_nbits != SPI_NBITS_DUAL && 4334 xfer->rx_nbits != SPI_NBITS_QUAD && 4335 xfer->rx_nbits != SPI_NBITS_OCTAL) 4336 return -EINVAL; 4337 if ((xfer->rx_nbits == SPI_NBITS_DUAL) && 4338 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL))) 4339 return -EINVAL; 4340 if ((xfer->rx_nbits == SPI_NBITS_QUAD) && 4341 !(spi->mode & (SPI_RX_QUAD | SPI_RX_OCTAL))) 4342 return -EINVAL; 4343 if ((xfer->rx_nbits == SPI_NBITS_OCTAL) && 4344 !(spi->mode & SPI_RX_OCTAL)) 4345 return -EINVAL; 4346 } 4347 4348 if (_spi_xfer_word_delay_update(xfer, spi)) 4349 return -EINVAL; 4350 4351 /* Make sure controller supports required offload features. */ 4352 if (xfer->offload_flags) { 4353 if (!message->offload) 4354 return -EINVAL; 4355 4356 if (xfer->offload_flags & ~message->offload->xfer_flags) 4357 return -EINVAL; 4358 } 4359 } 4360 4361 message->status = -EINPROGRESS; 4362 4363 return 0; 4364 } 4365 4366 /* 4367 * spi_split_transfers - generic handling of transfer splitting 4368 * @msg: the message to split 4369 * 4370 * Under certain conditions, a SPI controller may not support arbitrary 4371 * transfer sizes or other features required by a peripheral. This function 4372 * will split the transfers in the message into smaller transfers that are 4373 * supported by the controller. 4374 * 4375 * Controllers with special requirements not covered here can also split 4376 * transfers in the optimize_message() callback. 4377 * 4378 * Context: can sleep 4379 * Return: zero on success, else a negative error code 4380 */ 4381 static int spi_split_transfers(struct spi_message *msg) 4382 { 4383 struct spi_controller *ctlr = msg->spi->controller; 4384 struct spi_transfer *xfer; 4385 int ret; 4386 4387 /* 4388 * If an SPI controller does not support toggling the CS line on each 4389 * transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO 4390 * for the CS line, we can emulate the CS-per-word hardware function by 4391 * splitting transfers into one-word transfers and ensuring that 4392 * cs_change is set for each transfer. 4393 */ 4394 if ((msg->spi->mode & SPI_CS_WORD) && 4395 (!(ctlr->mode_bits & SPI_CS_WORD) || spi_is_csgpiod(msg->spi))) { 4396 ret = spi_split_transfers_maxwords(ctlr, msg, 1); 4397 if (ret) 4398 return ret; 4399 4400 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 4401 /* Don't change cs_change on the last entry in the list */ 4402 if (list_is_last(&xfer->transfer_list, &msg->transfers)) 4403 break; 4404 4405 xfer->cs_change = 1; 4406 } 4407 } else { 4408 ret = spi_split_transfers_maxsize(ctlr, msg, 4409 spi_max_transfer_size(msg->spi)); 4410 if (ret) 4411 return ret; 4412 } 4413 4414 return 0; 4415 } 4416 4417 /* 4418 * __spi_optimize_message - shared implementation for spi_optimize_message() 4419 * and spi_maybe_optimize_message() 4420 * @spi: the device that will be used for the message 4421 * @msg: the message to optimize 4422 * 4423 * Peripheral drivers will call spi_optimize_message() and the spi core will 4424 * call spi_maybe_optimize_message() instead of calling this directly. 4425 * 4426 * It is not valid to call this on a message that has already been optimized. 4427 * 4428 * Return: zero on success, else a negative error code 4429 */ 4430 static int __spi_optimize_message(struct spi_device *spi, 4431 struct spi_message *msg) 4432 { 4433 struct spi_controller *ctlr = spi->controller; 4434 int ret; 4435 4436 ret = __spi_validate(spi, msg); 4437 if (ret) 4438 return ret; 4439 4440 ret = spi_split_transfers(msg); 4441 if (ret) 4442 return ret; 4443 4444 if (ctlr->optimize_message) { 4445 ret = ctlr->optimize_message(msg); 4446 if (ret) { 4447 spi_res_release(ctlr, msg); 4448 return ret; 4449 } 4450 } 4451 4452 msg->optimized = true; 4453 4454 return 0; 4455 } 4456 4457 /* 4458 * spi_maybe_optimize_message - optimize message if it isn't already pre-optimized 4459 * @spi: the device that will be used for the message 4460 * @msg: the message to optimize 4461 * Return: zero on success, else a negative error code 4462 */ 4463 static int spi_maybe_optimize_message(struct spi_device *spi, 4464 struct spi_message *msg) 4465 { 4466 if (spi->controller->defer_optimize_message) { 4467 msg->spi = spi; 4468 return 0; 4469 } 4470 4471 if (msg->pre_optimized) 4472 return 0; 4473 4474 return __spi_optimize_message(spi, msg); 4475 } 4476 4477 /** 4478 * spi_optimize_message - do any one-time validation and setup for a SPI message 4479 * @spi: the device that will be used for the message 4480 * @msg: the message to optimize 4481 * 4482 * Peripheral drivers that reuse the same message repeatedly may call this to 4483 * perform as much message prep as possible once, rather than repeating it each 4484 * time a message transfer is performed to improve throughput and reduce CPU 4485 * usage. 4486 * 4487 * Once a message has been optimized, it cannot be modified with the exception 4488 * of updating the contents of any xfer->tx_buf (the pointer can't be changed, 4489 * only the data in the memory it points to). 4490 * 4491 * Calls to this function must be balanced with calls to spi_unoptimize_message() 4492 * to avoid leaking resources. 4493 * 4494 * Context: can sleep 4495 * Return: zero on success, else a negative error code 4496 */ 4497 int spi_optimize_message(struct spi_device *spi, struct spi_message *msg) 4498 { 4499 int ret; 4500 4501 /* 4502 * Pre-optimization is not supported and optimization is deferred e.g. 4503 * when using spi-mux. 4504 */ 4505 if (spi->controller->defer_optimize_message) 4506 return 0; 4507 4508 ret = __spi_optimize_message(spi, msg); 4509 if (ret) 4510 return ret; 4511 4512 /* 4513 * This flag indicates that the peripheral driver called spi_optimize_message() 4514 * and therefore we shouldn't unoptimize message automatically when finalizing 4515 * the message but rather wait until spi_unoptimize_message() is called 4516 * by the peripheral driver. 4517 */ 4518 msg->pre_optimized = true; 4519 4520 return 0; 4521 } 4522 EXPORT_SYMBOL_GPL(spi_optimize_message); 4523 4524 /** 4525 * spi_unoptimize_message - releases any resources allocated by spi_optimize_message() 4526 * @msg: the message to unoptimize 4527 * 4528 * Calls to this function must be balanced with calls to spi_optimize_message(). 4529 * 4530 * Context: can sleep 4531 */ 4532 void spi_unoptimize_message(struct spi_message *msg) 4533 { 4534 if (msg->spi->controller->defer_optimize_message) 4535 return; 4536 4537 __spi_unoptimize_message(msg); 4538 msg->pre_optimized = false; 4539 } 4540 EXPORT_SYMBOL_GPL(spi_unoptimize_message); 4541 4542 static int __spi_async(struct spi_device *spi, struct spi_message *message) 4543 { 4544 struct spi_controller *ctlr = spi->controller; 4545 struct spi_transfer *xfer; 4546 4547 /* 4548 * Some controllers do not support doing regular SPI transfers. Return 4549 * ENOTSUPP when this is the case. 4550 */ 4551 if (!ctlr->transfer) 4552 return -ENOTSUPP; 4553 4554 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_async); 4555 SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_async); 4556 4557 trace_spi_message_submit(message); 4558 4559 if (!ctlr->ptp_sts_supported) { 4560 list_for_each_entry(xfer, &message->transfers, transfer_list) { 4561 xfer->ptp_sts_word_pre = 0; 4562 ptp_read_system_prets(xfer->ptp_sts); 4563 } 4564 } 4565 4566 return ctlr->transfer(spi, message); 4567 } 4568 4569 static void devm_spi_unoptimize_message(void *msg) 4570 { 4571 spi_unoptimize_message(msg); 4572 } 4573 4574 /** 4575 * devm_spi_optimize_message - managed version of spi_optimize_message() 4576 * @dev: the device that manages @msg (usually @spi->dev) 4577 * @spi: the device that will be used for the message 4578 * @msg: the message to optimize 4579 * Return: zero on success, else a negative error code 4580 * 4581 * spi_unoptimize_message() will automatically be called when the device is 4582 * removed. 4583 */ 4584 int devm_spi_optimize_message(struct device *dev, struct spi_device *spi, 4585 struct spi_message *msg) 4586 { 4587 int ret; 4588 4589 ret = spi_optimize_message(spi, msg); 4590 if (ret) 4591 return ret; 4592 4593 return devm_add_action_or_reset(dev, devm_spi_unoptimize_message, msg); 4594 } 4595 EXPORT_SYMBOL_GPL(devm_spi_optimize_message); 4596 4597 /** 4598 * spi_async - asynchronous SPI transfer 4599 * @spi: device with which data will be exchanged 4600 * @message: describes the data transfers, including completion callback 4601 * Context: any (IRQs may be blocked, etc) 4602 * 4603 * This call may be used in_irq and other contexts which can't sleep, 4604 * as well as from task contexts which can sleep. 4605 * 4606 * The completion callback is invoked in a context which can't sleep. 4607 * Before that invocation, the value of message->status is undefined. 4608 * When the callback is issued, message->status holds either zero (to 4609 * indicate complete success) or a negative error code. After that 4610 * callback returns, the driver which issued the transfer request may 4611 * deallocate the associated memory; it's no longer in use by any SPI 4612 * core or controller driver code. 4613 * 4614 * Note that although all messages to a spi_device are handled in 4615 * FIFO order, messages may go to different devices in other orders. 4616 * Some device might be higher priority, or have various "hard" access 4617 * time requirements, for example. 4618 * 4619 * On detection of any fault during the transfer, processing of 4620 * the entire message is aborted, and the device is deselected. 4621 * Until returning from the associated message completion callback, 4622 * no other spi_message queued to that device will be processed. 4623 * (This rule applies equally to all the synchronous transfer calls, 4624 * which are wrappers around this core asynchronous primitive.) 4625 * 4626 * Return: zero on success, else a negative error code. 4627 */ 4628 int spi_async(struct spi_device *spi, struct spi_message *message) 4629 { 4630 struct spi_controller *ctlr = spi->controller; 4631 int ret; 4632 unsigned long flags; 4633 4634 ret = spi_maybe_optimize_message(spi, message); 4635 if (ret) 4636 return ret; 4637 4638 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); 4639 4640 if (ctlr->bus_lock_flag) 4641 ret = -EBUSY; 4642 else 4643 ret = __spi_async(spi, message); 4644 4645 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); 4646 4647 return ret; 4648 } 4649 EXPORT_SYMBOL_GPL(spi_async); 4650 4651 static void __spi_transfer_message_noqueue(struct spi_controller *ctlr, struct spi_message *msg) 4652 { 4653 bool was_busy; 4654 int ret; 4655 4656 mutex_lock(&ctlr->io_mutex); 4657 4658 was_busy = ctlr->busy; 4659 4660 ctlr->cur_msg = msg; 4661 ret = __spi_pump_transfer_message(ctlr, msg, was_busy); 4662 if (ret) 4663 dev_err(&ctlr->dev, "noqueue transfer failed\n"); 4664 ctlr->cur_msg = NULL; 4665 ctlr->fallback = false; 4666 4667 if (!was_busy) { 4668 kfree(ctlr->dummy_rx); 4669 ctlr->dummy_rx = NULL; 4670 kfree(ctlr->dummy_tx); 4671 ctlr->dummy_tx = NULL; 4672 if (ctlr->unprepare_transfer_hardware && 4673 ctlr->unprepare_transfer_hardware(ctlr)) 4674 dev_err(&ctlr->dev, 4675 "failed to unprepare transfer hardware\n"); 4676 spi_idle_runtime_pm(ctlr); 4677 } 4678 4679 mutex_unlock(&ctlr->io_mutex); 4680 } 4681 4682 /*-------------------------------------------------------------------------*/ 4683 4684 /* 4685 * Utility methods for SPI protocol drivers, layered on 4686 * top of the core. Some other utility methods are defined as 4687 * inline functions. 4688 */ 4689 4690 static void spi_complete(void *arg) 4691 { 4692 complete(arg); 4693 } 4694 4695 static int __spi_sync(struct spi_device *spi, struct spi_message *message) 4696 { 4697 DECLARE_COMPLETION_ONSTACK(done); 4698 unsigned long flags; 4699 int status; 4700 struct spi_controller *ctlr = spi->controller; 4701 4702 if (__spi_check_suspended(ctlr)) { 4703 dev_warn_once(&spi->dev, "Attempted to sync while suspend\n"); 4704 return -ESHUTDOWN; 4705 } 4706 4707 status = spi_maybe_optimize_message(spi, message); 4708 if (status) 4709 return status; 4710 4711 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync); 4712 SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync); 4713 4714 /* 4715 * Checking queue_empty here only guarantees async/sync message 4716 * ordering when coming from the same context. It does not need to 4717 * guard against reentrancy from a different context. The io_mutex 4718 * will catch those cases. 4719 */ 4720 if (READ_ONCE(ctlr->queue_empty) && !ctlr->must_async) { 4721 message->actual_length = 0; 4722 message->status = -EINPROGRESS; 4723 4724 trace_spi_message_submit(message); 4725 4726 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync_immediate); 4727 SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync_immediate); 4728 4729 __spi_transfer_message_noqueue(ctlr, message); 4730 4731 return message->status; 4732 } 4733 4734 /* 4735 * There are messages in the async queue that could have originated 4736 * from the same context, so we need to preserve ordering. 4737 * Therefor we send the message to the async queue and wait until they 4738 * are completed. 4739 */ 4740 message->complete = spi_complete; 4741 message->context = &done; 4742 4743 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); 4744 status = __spi_async(spi, message); 4745 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); 4746 4747 if (status == 0) { 4748 wait_for_completion(&done); 4749 status = message->status; 4750 } 4751 message->complete = NULL; 4752 message->context = NULL; 4753 4754 return status; 4755 } 4756 4757 /** 4758 * spi_sync - blocking/synchronous SPI data transfers 4759 * @spi: device with which data will be exchanged 4760 * @message: describes the data transfers 4761 * Context: can sleep 4762 * 4763 * This call may only be used from a context that may sleep. The sleep 4764 * is non-interruptible, and has no timeout. Low-overhead controller 4765 * drivers may DMA directly into and out of the message buffers. 4766 * 4767 * Note that the SPI device's chip select is active during the message, 4768 * and then is normally disabled between messages. Drivers for some 4769 * frequently-used devices may want to minimize costs of selecting a chip, 4770 * by leaving it selected in anticipation that the next message will go 4771 * to the same chip. (That may increase power usage.) 4772 * 4773 * Also, the caller is guaranteeing that the memory associated with the 4774 * message will not be freed before this call returns. 4775 * 4776 * Return: zero on success, else a negative error code. 4777 */ 4778 int spi_sync(struct spi_device *spi, struct spi_message *message) 4779 { 4780 int ret; 4781 4782 mutex_lock(&spi->controller->bus_lock_mutex); 4783 ret = __spi_sync(spi, message); 4784 mutex_unlock(&spi->controller->bus_lock_mutex); 4785 4786 return ret; 4787 } 4788 EXPORT_SYMBOL_GPL(spi_sync); 4789 4790 /** 4791 * spi_sync_locked - version of spi_sync with exclusive bus usage 4792 * @spi: device with which data will be exchanged 4793 * @message: describes the data transfers 4794 * Context: can sleep 4795 * 4796 * This call may only be used from a context that may sleep. The sleep 4797 * is non-interruptible, and has no timeout. Low-overhead controller 4798 * drivers may DMA directly into and out of the message buffers. 4799 * 4800 * This call should be used by drivers that require exclusive access to the 4801 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must 4802 * be released by a spi_bus_unlock call when the exclusive access is over. 4803 * 4804 * Return: zero on success, else a negative error code. 4805 */ 4806 int spi_sync_locked(struct spi_device *spi, struct spi_message *message) 4807 { 4808 return __spi_sync(spi, message); 4809 } 4810 EXPORT_SYMBOL_GPL(spi_sync_locked); 4811 4812 /** 4813 * spi_bus_lock - obtain a lock for exclusive SPI bus usage 4814 * @ctlr: SPI bus controller that should be locked for exclusive bus access 4815 * Context: can sleep 4816 * 4817 * This call may only be used from a context that may sleep. The sleep 4818 * is non-interruptible, and has no timeout. 4819 * 4820 * This call should be used by drivers that require exclusive access to the 4821 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the 4822 * exclusive access is over. Data transfer must be done by spi_sync_locked 4823 * and spi_async_locked calls when the SPI bus lock is held. 4824 * 4825 * Return: always zero. 4826 */ 4827 int spi_bus_lock(struct spi_controller *ctlr) 4828 { 4829 unsigned long flags; 4830 4831 mutex_lock(&ctlr->bus_lock_mutex); 4832 4833 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); 4834 ctlr->bus_lock_flag = 1; 4835 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); 4836 4837 /* Mutex remains locked until spi_bus_unlock() is called */ 4838 4839 return 0; 4840 } 4841 EXPORT_SYMBOL_GPL(spi_bus_lock); 4842 4843 /** 4844 * spi_bus_unlock - release the lock for exclusive SPI bus usage 4845 * @ctlr: SPI bus controller that was locked for exclusive bus access 4846 * Context: can sleep 4847 * 4848 * This call may only be used from a context that may sleep. The sleep 4849 * is non-interruptible, and has no timeout. 4850 * 4851 * This call releases an SPI bus lock previously obtained by an spi_bus_lock 4852 * call. 4853 * 4854 * Return: always zero. 4855 */ 4856 int spi_bus_unlock(struct spi_controller *ctlr) 4857 { 4858 ctlr->bus_lock_flag = 0; 4859 4860 mutex_unlock(&ctlr->bus_lock_mutex); 4861 4862 return 0; 4863 } 4864 EXPORT_SYMBOL_GPL(spi_bus_unlock); 4865 4866 /* Portable code must never pass more than 32 bytes */ 4867 #define SPI_BUFSIZ max(32, SMP_CACHE_BYTES) 4868 4869 static u8 *buf; 4870 4871 /** 4872 * spi_write_then_read - SPI synchronous write followed by read 4873 * @spi: device with which data will be exchanged 4874 * @txbuf: data to be written (need not be DMA-safe) 4875 * @n_tx: size of txbuf, in bytes 4876 * @rxbuf: buffer into which data will be read (need not be DMA-safe) 4877 * @n_rx: size of rxbuf, in bytes 4878 * Context: can sleep 4879 * 4880 * This performs a half duplex MicroWire style transaction with the 4881 * device, sending txbuf and then reading rxbuf. The return value 4882 * is zero for success, else a negative errno status code. 4883 * This call may only be used from a context that may sleep. 4884 * 4885 * Parameters to this routine are always copied using a small buffer. 4886 * Performance-sensitive or bulk transfer code should instead use 4887 * spi_{async,sync}() calls with DMA-safe buffers. 4888 * 4889 * Return: zero on success, else a negative error code. 4890 */ 4891 int spi_write_then_read(struct spi_device *spi, 4892 const void *txbuf, unsigned n_tx, 4893 void *rxbuf, unsigned n_rx) 4894 { 4895 static DEFINE_MUTEX(lock); 4896 4897 int status; 4898 struct spi_message message; 4899 struct spi_transfer x[2]; 4900 u8 *local_buf; 4901 4902 /* 4903 * Use preallocated DMA-safe buffer if we can. We can't avoid 4904 * copying here, (as a pure convenience thing), but we can 4905 * keep heap costs out of the hot path unless someone else is 4906 * using the pre-allocated buffer or the transfer is too large. 4907 */ 4908 if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) { 4909 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx), 4910 GFP_KERNEL | GFP_DMA); 4911 if (!local_buf) 4912 return -ENOMEM; 4913 } else { 4914 local_buf = buf; 4915 } 4916 4917 spi_message_init(&message); 4918 memset(x, 0, sizeof(x)); 4919 if (n_tx) { 4920 x[0].len = n_tx; 4921 spi_message_add_tail(&x[0], &message); 4922 } 4923 if (n_rx) { 4924 x[1].len = n_rx; 4925 spi_message_add_tail(&x[1], &message); 4926 } 4927 4928 memcpy(local_buf, txbuf, n_tx); 4929 x[0].tx_buf = local_buf; 4930 x[1].rx_buf = local_buf + n_tx; 4931 4932 /* Do the I/O */ 4933 status = spi_sync(spi, &message); 4934 if (status == 0) 4935 memcpy(rxbuf, x[1].rx_buf, n_rx); 4936 4937 if (x[0].tx_buf == buf) 4938 mutex_unlock(&lock); 4939 else 4940 kfree(local_buf); 4941 4942 return status; 4943 } 4944 EXPORT_SYMBOL_GPL(spi_write_then_read); 4945 4946 /*-------------------------------------------------------------------------*/ 4947 4948 #if IS_ENABLED(CONFIG_OF) 4949 /* The spi controllers are not using spi_bus, so we find it with another way */ 4950 struct spi_controller *of_find_spi_controller_by_node(struct device_node *node) 4951 { 4952 struct device *dev; 4953 4954 dev = class_find_device_by_of_node(&spi_controller_class, node); 4955 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE)) 4956 dev = class_find_device_by_of_node(&spi_target_class, node); 4957 if (!dev) 4958 return NULL; 4959 4960 /* Reference got in class_find_device */ 4961 return container_of(dev, struct spi_controller, dev); 4962 } 4963 EXPORT_SYMBOL_GPL(of_find_spi_controller_by_node); 4964 #endif 4965 4966 #if IS_ENABLED(CONFIG_OF_DYNAMIC) 4967 /* Must call put_device() when done with returned spi_device device */ 4968 static struct spi_device *of_find_spi_device_by_node(struct device_node *node) 4969 { 4970 struct device *dev = bus_find_device_by_of_node(&spi_bus_type, node); 4971 4972 return dev ? to_spi_device(dev) : NULL; 4973 } 4974 4975 static int of_spi_notify(struct notifier_block *nb, unsigned long action, 4976 void *arg) 4977 { 4978 struct of_reconfig_data *rd = arg; 4979 struct spi_controller *ctlr; 4980 struct spi_device *spi; 4981 4982 switch (of_reconfig_get_state_change(action, arg)) { 4983 case OF_RECONFIG_CHANGE_ADD: 4984 ctlr = of_find_spi_controller_by_node(rd->dn->parent); 4985 if (ctlr == NULL) 4986 return NOTIFY_OK; /* Not for us */ 4987 4988 if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) { 4989 put_device(&ctlr->dev); 4990 return NOTIFY_OK; 4991 } 4992 4993 /* 4994 * Clear the flag before adding the device so that fw_devlink 4995 * doesn't skip adding consumers to this device. 4996 */ 4997 fwnode_clear_flag(&rd->dn->fwnode, FWNODE_FLAG_NOT_DEVICE); 4998 spi = of_register_spi_device(ctlr, rd->dn); 4999 put_device(&ctlr->dev); 5000 5001 if (IS_ERR(spi)) { 5002 pr_err("%s: failed to create for '%pOF'\n", 5003 __func__, rd->dn); 5004 of_node_clear_flag(rd->dn, OF_POPULATED); 5005 return notifier_from_errno(PTR_ERR(spi)); 5006 } 5007 break; 5008 5009 case OF_RECONFIG_CHANGE_REMOVE: 5010 /* Already depopulated? */ 5011 if (!of_node_check_flag(rd->dn, OF_POPULATED)) 5012 return NOTIFY_OK; 5013 5014 /* Find our device by node */ 5015 spi = of_find_spi_device_by_node(rd->dn); 5016 if (spi == NULL) 5017 return NOTIFY_OK; /* No? not meant for us */ 5018 5019 /* Unregister takes one ref away */ 5020 spi_unregister_device(spi); 5021 5022 /* And put the reference of the find */ 5023 put_device(&spi->dev); 5024 break; 5025 } 5026 5027 return NOTIFY_OK; 5028 } 5029 5030 static struct notifier_block spi_of_notifier = { 5031 .notifier_call = of_spi_notify, 5032 }; 5033 #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ 5034 extern struct notifier_block spi_of_notifier; 5035 #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ 5036 5037 #if IS_ENABLED(CONFIG_ACPI) 5038 static int spi_acpi_controller_match(struct device *dev, const void *data) 5039 { 5040 return device_match_acpi_dev(dev->parent, data); 5041 } 5042 5043 struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev) 5044 { 5045 struct device *dev; 5046 5047 dev = class_find_device(&spi_controller_class, NULL, adev, 5048 spi_acpi_controller_match); 5049 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE)) 5050 dev = class_find_device(&spi_target_class, NULL, adev, 5051 spi_acpi_controller_match); 5052 if (!dev) 5053 return NULL; 5054 5055 return container_of(dev, struct spi_controller, dev); 5056 } 5057 EXPORT_SYMBOL_GPL(acpi_spi_find_controller_by_adev); 5058 5059 static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev) 5060 { 5061 struct device *dev; 5062 5063 dev = bus_find_device_by_acpi_dev(&spi_bus_type, adev); 5064 return to_spi_device(dev); 5065 } 5066 5067 static int acpi_spi_notify(struct notifier_block *nb, unsigned long value, 5068 void *arg) 5069 { 5070 struct acpi_device *adev = arg; 5071 struct spi_controller *ctlr; 5072 struct spi_device *spi; 5073 5074 switch (value) { 5075 case ACPI_RECONFIG_DEVICE_ADD: 5076 ctlr = acpi_spi_find_controller_by_adev(acpi_dev_parent(adev)); 5077 if (!ctlr) 5078 break; 5079 5080 acpi_register_spi_device(ctlr, adev); 5081 put_device(&ctlr->dev); 5082 break; 5083 case ACPI_RECONFIG_DEVICE_REMOVE: 5084 if (!acpi_device_enumerated(adev)) 5085 break; 5086 5087 spi = acpi_spi_find_device_by_adev(adev); 5088 if (!spi) 5089 break; 5090 5091 spi_unregister_device(spi); 5092 put_device(&spi->dev); 5093 break; 5094 } 5095 5096 return NOTIFY_OK; 5097 } 5098 5099 static struct notifier_block spi_acpi_notifier = { 5100 .notifier_call = acpi_spi_notify, 5101 }; 5102 #else 5103 extern struct notifier_block spi_acpi_notifier; 5104 #endif 5105 5106 static int __init spi_init(void) 5107 { 5108 int status; 5109 5110 buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL); 5111 if (!buf) { 5112 status = -ENOMEM; 5113 goto err0; 5114 } 5115 5116 status = bus_register(&spi_bus_type); 5117 if (status < 0) 5118 goto err1; 5119 5120 status = class_register(&spi_controller_class); 5121 if (status < 0) 5122 goto err2; 5123 5124 if (IS_ENABLED(CONFIG_SPI_SLAVE)) { 5125 status = class_register(&spi_target_class); 5126 if (status < 0) 5127 goto err3; 5128 } 5129 5130 if (IS_ENABLED(CONFIG_OF_DYNAMIC)) 5131 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier)); 5132 if (IS_ENABLED(CONFIG_ACPI)) 5133 WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier)); 5134 5135 return 0; 5136 5137 err3: 5138 class_unregister(&spi_controller_class); 5139 err2: 5140 bus_unregister(&spi_bus_type); 5141 err1: 5142 kfree(buf); 5143 buf = NULL; 5144 err0: 5145 return status; 5146 } 5147 5148 /* 5149 * A board_info is normally registered in arch_initcall(), 5150 * but even essential drivers wait till later. 5151 * 5152 * REVISIT only boardinfo really needs static linking. The rest (device and 5153 * driver registration) _could_ be dynamically linked (modular) ... Costs 5154 * include needing to have boardinfo data structures be much more public. 5155 */ 5156 postcore_initcall(spi_init); 5157