Lines Matching +full:ignore +full:- +full:power +full:- +full:on +full:- +full:sel
1 // SPDX-License-Identifier: GPL-2.0-or-later
5 * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
13 #include <linux/dma-mapping.h>
15 #include <linux/firewire-constants.h>
44 #define ohci_info(ohci, f, args...) dev_info(ohci->card.device, f, ##args)
45 #define ohci_notice(ohci, f, args...) dev_notice(ohci->card.device, f, ##args)
46 #define ohci_err(ohci, f, args...) dev_err(ohci->card.device, f, ##args)
106 * A buffer that contains a block of DMA-able coherent memory used for
126 * List of page-sized buffers for storing DMA descriptors.
282 // On PCI Express Root Complex in any type of AMD Ryzen machine, VIA VT6306/6307/6308 with Asmedia
293 return !!(ohci->quirks & QUIRK_REBOOT_BY_CYCLE_TIMER_READ); in has_reboot_by_cycle_timer_read_quirk()
307 if (pdev->vendor != PCI_VENDOR_ID_VIA) in detect_vt630x_with_asm1083_on_amd_ryzen_machine()
309 if (pdev->device != PCI_DEVICE_ID_VIA_VT630X) in detect_vt630x_with_asm1083_on_amd_ryzen_machine()
313 pcie_to_pci_bridge = pdev->bus->self; in detect_vt630x_with_asm1083_on_amd_ryzen_machine()
314 if (pcie_to_pci_bridge->vendor != PCI_VENDOR_ID_ASMEDIA) in detect_vt630x_with_asm1083_on_amd_ryzen_machine()
316 if (pcie_to_pci_bridge->device != PCI_DEVICE_ID_ASMEDIA_ASM108X) in detect_vt630x_with_asm1083_on_amd_ryzen_machine()
402 ", self-IDs = " __stringify(OHCI_PARAM_DEBUG_SELFIDS)
405 ", or a combination, or all = -1)");
449 static const char *power[] = { variable
451 [4] = "-3W", [5] = " ?W", [6] = "-3..-6W", [7] = "-3..-10W",
453 static const char port[] = { '.', '-', 'p', 'c', };
468 self_id_count, generation, ohci->node_id); in log_selfids()
470 for (s = ohci->self_id_buffer; self_id_count--; ++s) in log_selfids()
476 power[*s >> 8 & 7], *s >> 22 & 1 ? "L" : "", in log_selfids()
487 [0x00] = "evt_no_status", [0x01] = "-reserved-",
493 [0x0c] = "-reserved-", [0x0d] = "-reserved-",
495 [0x10] = "-reserved-", [0x11] = "ack_complete",
496 [0x12] = "ack_pending ", [0x13] = "-reserved-",
498 [0x16] = "ack_busy_B", [0x17] = "-reserved-",
499 [0x18] = "-reserved-", [0x19] = "-reserved-",
500 [0x1a] = "-reserved-", [0x1b] = "ack_tardy",
501 [0x1c] = "-reserved-", [0x1d] = "ack_data_error",
502 [0x1e] = "ack_type_error", [0x1f] = "-reserved-",
507 [0x2] = "W resp", [0x3] = "-reserved-",
512 [0xc] = "-reserved-", [0xd] = "-reserved-",
513 [0xe] = "link internal", [0xf] = "-reserved-",
558 "A%c spd %x tl %02x, %04x -> %04x, %s, %s, %04x%08x%s\n", in log_ar_at_event()
565 "A%c spd %x tl %02x, %04x -> %04x, %s, %s%s\n", in log_ar_at_event()
574 writel(data, ohci->registers + offset); in reg_write()
579 return readl(ohci->registers + offset); in reg_read()
590 * read_paged_phy_reg() require the caller to hold ohci->phy_reg_mutex.
603 return -ENODEV; /* Card was ejected. */ in read_phy_reg()
618 return -EBUSY; in read_phy_reg()
630 return -ENODEV; /* Card was ejected. */ in write_phy_reg()
641 return -EBUSY; in write_phy_reg()
677 mutex_lock(&ohci->phy_reg_mutex); in ohci_read_phy_reg()
679 mutex_unlock(&ohci->phy_reg_mutex); in ohci_read_phy_reg()
690 mutex_lock(&ohci->phy_reg_mutex); in ohci_update_phy_reg()
692 mutex_unlock(&ohci->phy_reg_mutex); in ohci_update_phy_reg()
699 return page_private(ctx->pages[i]); in ar_buffer_bus()
706 d = &ctx->descriptors[index]; in ar_context_link_page()
707 d->branch_address &= cpu_to_le32(~0xf); in ar_context_link_page()
708 d->res_count = cpu_to_le16(PAGE_SIZE); in ar_context_link_page()
709 d->transfer_status = 0; in ar_context_link_page()
712 d = &ctx->descriptors[ctx->last_buffer_index]; in ar_context_link_page()
713 d->branch_address |= cpu_to_le32(1); in ar_context_link_page()
715 ctx->last_buffer_index = index; in ar_context_link_page()
717 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE); in ar_context_link_page()
722 struct device *dev = ctx->ohci->card.device; in ar_context_release()
725 if (!ctx->buffer) in ar_context_release()
728 vunmap(ctx->buffer); in ar_context_release()
731 if (ctx->pages[i]) in ar_context_release()
732 dma_free_pages(dev, PAGE_SIZE, ctx->pages[i], in ar_context_release()
739 struct fw_ohci *ohci = ctx->ohci; in ar_context_abort()
741 if (reg_read(ohci, CONTROL_CLEAR(ctx->regs)) & CONTEXT_RUN) { in ar_context_abort()
742 reg_write(ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN); in ar_context_abort()
757 return ar_next_buffer_index(ctx->last_buffer_index); in ar_first_buffer_index()
767 unsigned int i, next_i, last = ctx->last_buffer_index; in ar_search_last_active_buffer()
771 res_count = READ_ONCE(ctx->descriptors[i].res_count); in ar_search_last_active_buffer()
779 next_res_count = READ_ONCE(ctx->descriptors[next_i].res_count); in ar_search_last_active_buffer()
795 next_res_count = READ_ONCE(ctx->descriptors[next_i].res_count); in ar_search_last_active_buffer()
810 *buffer_offset = PAGE_SIZE - le16_to_cpu(res_count); in ar_search_last_active_buffer()
827 dma_sync_single_for_cpu(ctx->ohci->card.device, in ar_sync_buffers_for_cpu()
833 dma_sync_single_for_cpu(ctx->ohci->card.device, in ar_sync_buffers_for_cpu()
840 (ohci->quirks & QUIRK_BE_HEADERS ? (__force __u32)(v) : le32_to_cpu(v))
847 struct fw_ohci *ohci = ctx->ohci; in handle_ar_packet()
903 p.ack = evt - 16; in handle_ar_packet()
906 p.generation = ohci->request_generation; in handle_ar_packet()
932 if (!(ohci->quirks & QUIRK_RESET_PACKET)) in handle_ar_packet()
933 ohci->request_generation = (p.header[2] >> 16) & 0xff; in handle_ar_packet()
934 } else if (ctx == &ohci->ar_request_ctx) { in handle_ar_packet()
935 fw_core_handle_request(&ohci->card, &p); in handle_ar_packet()
937 fw_core_handle_response(&ohci->card, &p); in handle_ar_packet()
963 dma_sync_single_for_device(ctx->ohci->card.device, in ar_recycle_buffers()
977 p = ctx->pointer; in ar_context_tasklet()
984 end = ctx->buffer + end_buffer_index * PAGE_SIZE + end_buffer_offset; in ar_context_tasklet()
993 void *buffer_end = ctx->buffer + AR_BUFFERS * PAGE_SIZE; in ar_context_tasklet()
998 p -= AR_BUFFERS * PAGE_SIZE; in ar_context_tasklet()
1008 ctx->pointer = p; in ar_context_tasklet()
1014 ctx->pointer = NULL; in ar_context_tasklet()
1020 struct device *dev = ohci->card.device; in ar_context_init()
1026 ctx->regs = regs; in ar_context_init()
1027 ctx->ohci = ohci; in ar_context_init()
1028 tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx); in ar_context_init()
1031 ctx->pages[i] = dma_alloc_pages(dev, PAGE_SIZE, &dma_addr, in ar_context_init()
1033 if (!ctx->pages[i]) in ar_context_init()
1035 set_page_private(ctx->pages[i], dma_addr); in ar_context_init()
1041 pages[i] = ctx->pages[i]; in ar_context_init()
1043 pages[AR_BUFFERS + i] = ctx->pages[i]; in ar_context_init()
1044 ctx->buffer = vmap(pages, ARRAY_SIZE(pages), VM_MAP, PAGE_KERNEL); in ar_context_init()
1045 if (!ctx->buffer) in ar_context_init()
1048 ctx->descriptors = ohci->misc_buffer + descriptors_offset; in ar_context_init()
1049 ctx->descriptors_bus = ohci->misc_buffer_bus + descriptors_offset; in ar_context_init()
1052 d = &ctx->descriptors[i]; in ar_context_init()
1053 d->req_count = cpu_to_le16(PAGE_SIZE); in ar_context_init()
1054 d->control = cpu_to_le16(DESCRIPTOR_INPUT_MORE | in ar_context_init()
1057 d->data_address = cpu_to_le32(ar_buffer_bus(ctx, i)); in ar_context_init()
1058 d->branch_address = cpu_to_le32(ctx->descriptors_bus + in ar_context_init()
1067 return -ENOMEM; in ar_context_init()
1077 ctx->pointer = ctx->buffer; in ar_context_run()
1079 reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ctx->descriptors_bus | 1); in ar_context_run()
1080 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN); in ar_context_run()
1087 branch = d->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS); in find_branch_descriptor()
1093 return d + z - 1; in find_branch_descriptor()
1104 desc = list_entry(ctx->buffer_list.next, in context_tasklet()
1106 last = ctx->last; in context_tasklet()
1107 while (last->branch_address != 0) { in context_tasklet()
1109 address = le32_to_cpu(last->branch_address); in context_tasklet()
1112 ctx->current_bus = address; in context_tasklet()
1116 if (address < desc->buffer_bus || in context_tasklet()
1117 address >= desc->buffer_bus + desc->used) in context_tasklet()
1118 desc = list_entry(desc->list.next, in context_tasklet()
1120 d = desc->buffer + (address - desc->buffer_bus) / sizeof(*d); in context_tasklet()
1123 if (!ctx->callback(ctx, d, last)) in context_tasklet()
1130 old_desc->used = 0; in context_tasklet()
1131 spin_lock_irqsave(&ctx->ohci->lock, flags); in context_tasklet()
1132 list_move_tail(&old_desc->list, &ctx->buffer_list); in context_tasklet()
1133 spin_unlock_irqrestore(&ctx->ohci->lock, flags); in context_tasklet()
1135 ctx->last = last; in context_tasklet()
1141 * context. Must be called with ohci->lock held.
1151 * program. This will catch run-away userspace or DoS attacks. in context_add_buffer()
1153 if (ctx->total_allocation >= 16*1024*1024) in context_add_buffer()
1154 return -ENOMEM; in context_add_buffer()
1156 desc = dmam_alloc_coherent(ctx->ohci->card.device, PAGE_SIZE, &bus_addr, GFP_ATOMIC); in context_add_buffer()
1158 return -ENOMEM; in context_add_buffer()
1160 offset = (void *)&desc->buffer - (void *)desc; in context_add_buffer()
1162 * Some controllers, like JMicron ones, always issue 0x20-byte DMA reads in context_add_buffer()
1163 * for descriptors, even 0x10-byte ones. This can cause page faults when in context_add_buffer()
1167 desc->buffer_size = PAGE_SIZE - offset - 0x10; in context_add_buffer()
1168 desc->buffer_bus = bus_addr + offset; in context_add_buffer()
1169 desc->used = 0; in context_add_buffer()
1171 list_add_tail(&desc->list, &ctx->buffer_list); in context_add_buffer()
1172 ctx->total_allocation += PAGE_SIZE; in context_add_buffer()
1180 ctx->ohci = ohci; in context_init()
1181 ctx->regs = regs; in context_init()
1182 ctx->total_allocation = 0; in context_init()
1184 INIT_LIST_HEAD(&ctx->buffer_list); in context_init()
1186 return -ENOMEM; in context_init()
1188 ctx->buffer_tail = list_entry(ctx->buffer_list.next, in context_init()
1191 tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx); in context_init()
1192 ctx->callback = callback; in context_init()
1199 memset(ctx->buffer_tail->buffer, 0, sizeof(*ctx->buffer_tail->buffer)); in context_init()
1200 ctx->buffer_tail->buffer->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST); in context_init()
1201 ctx->buffer_tail->buffer->transfer_status = cpu_to_le16(0x8011); in context_init()
1202 ctx->buffer_tail->used += sizeof(*ctx->buffer_tail->buffer); in context_init()
1203 ctx->last = ctx->buffer_tail->buffer; in context_init()
1204 ctx->prev = ctx->buffer_tail->buffer; in context_init()
1205 ctx->prev_z = 1; in context_init()
1212 struct fw_card *card = &ctx->ohci->card; in context_release()
1215 list_for_each_entry_safe(desc, tmp, &ctx->buffer_list, list) { in context_release()
1216 dmam_free_coherent(card->device, PAGE_SIZE, desc, in context_release()
1217 desc->buffer_bus - ((void *)&desc->buffer - (void *)desc)); in context_release()
1221 /* Must be called with ohci->lock held */
1226 struct descriptor_buffer *desc = ctx->buffer_tail; in context_get_descriptors()
1228 if (z * sizeof(*d) > desc->buffer_size) in context_get_descriptors()
1231 if (z * sizeof(*d) > desc->buffer_size - desc->used) { in context_get_descriptors()
1235 if (desc->list.next == &ctx->buffer_list) { in context_get_descriptors()
1241 desc = list_entry(desc->list.next, in context_get_descriptors()
1243 ctx->buffer_tail = desc; in context_get_descriptors()
1246 d = desc->buffer + desc->used / sizeof(*d); in context_get_descriptors()
1248 *d_bus = desc->buffer_bus + desc->used; in context_get_descriptors()
1255 struct fw_ohci *ohci = ctx->ohci; in context_run()
1257 reg_write(ohci, COMMAND_PTR(ctx->regs), in context_run()
1258 le32_to_cpu(ctx->last->branch_address)); in context_run()
1259 reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0); in context_run()
1260 reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra); in context_run()
1261 ctx->running = true; in context_run()
1269 struct descriptor_buffer *desc = ctx->buffer_tail; in context_append()
1272 d_bus = desc->buffer_bus + (d - desc->buffer) * sizeof(*d); in context_append()
1274 desc->used += (z + extra) * sizeof(*d); in context_append()
1278 d_branch = find_branch_descriptor(ctx->prev, ctx->prev_z); in context_append()
1279 d_branch->branch_address = cpu_to_le32(d_bus | z); in context_append()
1284 * multi-descriptor block starting with an INPUT_MORE, put a copy of in context_append()
1290 if (unlikely(ctx->ohci->quirks & QUIRK_IR_WAKE) && in context_append()
1291 d_branch != ctx->prev && in context_append()
1292 (ctx->prev->control & cpu_to_le16(DESCRIPTOR_CMD)) == in context_append()
1294 ctx->prev->branch_address = cpu_to_le32(d_bus | z); in context_append()
1297 ctx->prev = d; in context_append()
1298 ctx->prev_z = z; in context_append()
1303 struct fw_ohci *ohci = ctx->ohci; in context_stop()
1307 reg_write(ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN); in context_stop()
1308 ctx->running = false; in context_stop()
1311 reg = reg_read(ohci, CONTROL_SET(ctx->regs)); in context_stop()
1328 * Must always be called with the ochi->lock held to ensure proper
1334 struct fw_ohci *ohci = ctx->ohci; in at_context_queue_packet()
1343 packet->ack = RCODE_SEND_ERROR; in at_context_queue_packet()
1344 return -1; in at_context_queue_packet()
1348 d[0].res_count = cpu_to_le16(packet->timestamp); in at_context_queue_packet()
1356 tcode = (packet->header[0] >> 4) & 0x0f; in at_context_queue_packet()
1368 header[0] = cpu_to_le32((packet->header[0] & 0xffff) | in at_context_queue_packet()
1369 (packet->speed << 16)); in at_context_queue_packet()
1370 header[1] = cpu_to_le32((packet->header[1] & 0xffff) | in at_context_queue_packet()
1371 (packet->header[0] & 0xffff0000)); in at_context_queue_packet()
1372 header[2] = cpu_to_le32(packet->header[2]); in at_context_queue_packet()
1375 header[3] = cpu_to_le32(packet->header[3]); in at_context_queue_packet()
1377 header[3] = (__force __le32) packet->header[3]; in at_context_queue_packet()
1379 d[0].req_count = cpu_to_le16(packet->header_length); in at_context_queue_packet()
1384 (packet->speed << 16)); in at_context_queue_packet()
1385 header[1] = cpu_to_le32(packet->header[1]); in at_context_queue_packet()
1386 header[2] = cpu_to_le32(packet->header[2]); in at_context_queue_packet()
1389 if (is_ping_packet(&packet->header[1])) in at_context_queue_packet()
1394 header[0] = cpu_to_le32((packet->header[0] & 0xffff) | in at_context_queue_packet()
1395 (packet->speed << 16)); in at_context_queue_packet()
1396 header[1] = cpu_to_le32(packet->header[0] & 0xffff0000); in at_context_queue_packet()
1402 packet->ack = RCODE_SEND_ERROR; in at_context_queue_packet()
1403 return -1; in at_context_queue_packet()
1408 driver_data->packet = packet; in at_context_queue_packet()
1409 packet->driver_data = driver_data; in at_context_queue_packet()
1411 if (packet->payload_length > 0) { in at_context_queue_packet()
1412 if (packet->payload_length > sizeof(driver_data->inline_data)) { in at_context_queue_packet()
1413 payload_bus = dma_map_single(ohci->card.device, in at_context_queue_packet()
1414 packet->payload, in at_context_queue_packet()
1415 packet->payload_length, in at_context_queue_packet()
1417 if (dma_mapping_error(ohci->card.device, payload_bus)) { in at_context_queue_packet()
1418 packet->ack = RCODE_SEND_ERROR; in at_context_queue_packet()
1419 return -1; in at_context_queue_packet()
1421 packet->payload_bus = payload_bus; in at_context_queue_packet()
1422 packet->payload_mapped = true; in at_context_queue_packet()
1424 memcpy(driver_data->inline_data, packet->payload, in at_context_queue_packet()
1425 packet->payload_length); in at_context_queue_packet()
1429 d[2].req_count = cpu_to_le16(packet->payload_length); in at_context_queue_packet()
1438 last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST | in at_context_queue_packet()
1443 if (ohci->generation != packet->generation) { in at_context_queue_packet()
1444 if (packet->payload_mapped) in at_context_queue_packet()
1445 dma_unmap_single(ohci->card.device, payload_bus, in at_context_queue_packet()
1446 packet->payload_length, DMA_TO_DEVICE); in at_context_queue_packet()
1447 packet->ack = RCODE_GENERATION; in at_context_queue_packet()
1448 return -1; in at_context_queue_packet()
1451 context_append(ctx, d, z, 4 - z); in at_context_queue_packet()
1453 if (ctx->running) in at_context_queue_packet()
1454 reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE); in at_context_queue_packet()
1463 tasklet_disable(&ctx->tasklet); in at_context_flush()
1465 ctx->flushing = true; in at_context_flush()
1467 ctx->flushing = false; in at_context_flush()
1469 tasklet_enable(&ctx->tasklet); in at_context_flush()
1478 struct fw_ohci *ohci = context->ohci; in handle_at_packet()
1481 if (last->transfer_status == 0 && !context->flushing) in handle_at_packet()
1486 packet = driver_data->packet; in handle_at_packet()
1491 if (packet->payload_mapped) in handle_at_packet()
1492 dma_unmap_single(ohci->card.device, packet->payload_bus, in handle_at_packet()
1493 packet->payload_length, DMA_TO_DEVICE); in handle_at_packet()
1495 evt = le16_to_cpu(last->transfer_status) & 0x1f; in handle_at_packet()
1496 packet->timestamp = le16_to_cpu(last->res_count); in handle_at_packet()
1498 log_ar_at_event(ohci, 'T', packet->speed, packet->header, evt); in handle_at_packet()
1503 packet->ack = RCODE_CANCELLED; in handle_at_packet()
1511 packet->ack = RCODE_GENERATION; in handle_at_packet()
1515 if (context->flushing) in handle_at_packet()
1516 packet->ack = RCODE_GENERATION; in handle_at_packet()
1520 * node is not on the bus or not sending acks. in handle_at_packet()
1522 packet->ack = RCODE_NO_ACK; in handle_at_packet()
1533 packet->ack = evt - 0x10; in handle_at_packet()
1537 if (context->flushing) { in handle_at_packet()
1538 packet->ack = RCODE_GENERATION; in handle_at_packet()
1544 packet->ack = RCODE_SEND_ERROR; in handle_at_packet()
1548 packet->callback(packet, &ohci->card, packet->ack); in handle_at_packet()
1565 tcode = HEADER_GET_TCODE(packet->header[0]); in handle_local_rom()
1567 length = HEADER_GET_DATA_LENGTH(packet->header[3]); in handle_local_rom()
1571 i = csr - CSR_CONFIG_ROM; in handle_local_rom()
1573 fw_fill_response(&response, packet->header, in handle_local_rom()
1576 fw_fill_response(&response, packet->header, in handle_local_rom()
1579 fw_fill_response(&response, packet->header, RCODE_COMPLETE, in handle_local_rom()
1580 (void *) ohci->config_rom + i, length); in handle_local_rom()
1583 fw_core_handle_response(&ohci->card, &response); in handle_local_rom()
1590 int tcode, length, ext_tcode, sel, try; in handle_local_lock() local
1594 tcode = HEADER_GET_TCODE(packet->header[0]); in handle_local_lock()
1595 length = HEADER_GET_DATA_LENGTH(packet->header[3]); in handle_local_lock()
1596 payload = packet->payload; in handle_local_lock()
1597 ext_tcode = HEADER_GET_EXTENDED_TCODE(packet->header[3]); in handle_local_lock()
1607 fw_fill_response(&response, packet->header, in handle_local_lock()
1612 sel = (csr - CSR_BUS_MANAGER_ID) / 4; in handle_local_lock()
1615 reg_write(ohci, OHCI1394_CSRControl, sel); in handle_local_lock()
1621 fw_fill_response(&response, packet->header, in handle_local_lock()
1628 fw_fill_response(&response, packet->header, RCODE_BUSY, NULL, 0); in handle_local_lock()
1631 fw_core_handle_response(&ohci->card, &response); in handle_local_lock()
1638 if (ctx == &ctx->ohci->at_request_ctx) { in handle_local_request()
1639 packet->ack = ACK_PENDING; in handle_local_request()
1640 packet->callback(packet, &ctx->ohci->card, packet->ack); in handle_local_request()
1645 HEADER_GET_OFFSET_HIGH(packet->header[1]) << 32) | in handle_local_request()
1646 packet->header[2]; in handle_local_request()
1647 csr = offset - CSR_REGISTER_BASE; in handle_local_request()
1651 handle_local_rom(ctx->ohci, packet, csr); in handle_local_request()
1657 handle_local_lock(ctx->ohci, packet, csr); in handle_local_request()
1660 if (ctx == &ctx->ohci->at_request_ctx) in handle_local_request()
1661 fw_core_handle_request(&ctx->ohci->card, packet); in handle_local_request()
1663 fw_core_handle_response(&ctx->ohci->card, packet); in handle_local_request()
1667 if (ctx == &ctx->ohci->at_response_ctx) { in handle_local_request()
1668 packet->ack = ACK_COMPLETE; in handle_local_request()
1669 packet->callback(packet, &ctx->ohci->card, packet->ack); in handle_local_request()
1680 spin_lock_irqsave(&ctx->ohci->lock, flags); in at_context_transmit()
1682 if (HEADER_GET_DESTINATION(packet->header[0]) == ctx->ohci->node_id && in at_context_transmit()
1683 ctx->ohci->generation == packet->generation) { in at_context_transmit()
1684 spin_unlock_irqrestore(&ctx->ohci->lock, flags); in at_context_transmit()
1686 // Timestamping on behalf of the hardware. in at_context_transmit()
1687 packet->timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ctx->ohci)); in at_context_transmit()
1694 spin_unlock_irqrestore(&ctx->ohci->lock, flags); in at_context_transmit()
1697 // Timestamping on behalf of the hardware. in at_context_transmit()
1698 packet->timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ctx->ohci)); in at_context_transmit()
1700 packet->callback(packet, &ctx->ohci->card, packet->ack); in at_context_transmit()
1725 if (!(ohci->it_context_support & (1 << i))) in handle_dead_contexts()
1731 if (!(ohci->ir_context_support & (1 << i))) in handle_dead_contexts()
1753 * - When the lowest six bits are wrapping around to zero, a read that happens
1755 * - When the cycleOffset field wraps around to zero, the cycleCount field is
1757 * - Occasionally, the entire register reads zero.
1777 if (ohci->quirks & QUIRK_CYCLE_TIMER) { in get_cycle_time()
1788 diff01 = t1 - t0; in get_cycle_time()
1789 diff12 = t2 - t1; in get_cycle_time()
1808 if (unlikely(!ohci->bus_time_running)) { in update_bus_time()
1810 ohci->bus_time = (lower_32_bits(ktime_get_seconds()) & ~0x7f) | in update_bus_time()
1812 ohci->bus_time_running = true; in update_bus_time()
1815 if ((ohci->bus_time & 0x40) != (cycle_time_seconds & 0x40)) in update_bus_time()
1816 ohci->bus_time += 0x40; in update_bus_time()
1818 return ohci->bus_time | cycle_time_seconds; in update_bus_time()
1825 mutex_lock(&ohci->phy_reg_mutex); in get_status_for_port()
1829 mutex_unlock(&ohci->phy_reg_mutex); in get_status_for_port()
1849 entry = ohci->self_id_buffer[i]; in get_self_id_pos()
1851 return -1; in get_self_id_pos()
1863 mutex_lock(&ohci->phy_reg_mutex); in initiated_reset()
1879 mutex_unlock(&ohci->phy_reg_mutex); in initiated_reset()
1898 return -EBUSY; in find_and_insert_self_id()
1902 reg = ohci_read_phy_reg(&ohci->card, 4); in find_and_insert_self_id()
1905 self_id |= ((reg & 0x07) << 8); /* power class */ in find_and_insert_self_id()
1907 reg = ohci_read_phy_reg(&ohci->card, 1); in find_and_insert_self_id()
1916 self_id |= ((status & 0x3) << (6 - (i * 2))); in find_and_insert_self_id()
1923 memmove(&(ohci->self_id_buffer[pos+1]), in find_and_insert_self_id()
1924 &(ohci->self_id_buffer[pos]), in find_and_insert_self_id()
1925 (self_id_count - pos) * sizeof(*ohci->self_id_buffer)); in find_and_insert_self_id()
1926 ohci->self_id_buffer[pos] = self_id; in find_and_insert_self_id()
1952 ohci->node_id = reg & (OHCI1394_NodeID_busNumber | in bus_reset_work()
1956 if (!(ohci->is_root && is_new_root)) in bus_reset_work()
1959 ohci->is_root = is_new_root; in bus_reset_work()
1979 generation = (cond_le32_to_cpu(ohci->self_id[0]) >> 16) & 0xff; in bus_reset_work()
1983 u32 id = cond_le32_to_cpu(ohci->self_id[i]); in bus_reset_work()
1984 u32 id2 = cond_le32_to_cpu(ohci->self_id[i + 1]); in bus_reset_work()
2004 ohci->self_id_buffer[j] = id; in bus_reset_work()
2007 if (ohci->quirks & QUIRK_TI_SLLZ059) { in bus_reset_work()
2043 spin_lock_irq(&ohci->lock); in bus_reset_work()
2045 ohci->generation = -1; /* prevent AT packet queueing */ in bus_reset_work()
2046 context_stop(&ohci->at_request_ctx); in bus_reset_work()
2047 context_stop(&ohci->at_response_ctx); in bus_reset_work()
2049 spin_unlock_irq(&ohci->lock); in bus_reset_work()
2056 at_context_flush(&ohci->at_request_ctx); in bus_reset_work()
2057 at_context_flush(&ohci->at_response_ctx); in bus_reset_work()
2059 spin_lock_irq(&ohci->lock); in bus_reset_work()
2061 ohci->generation = generation; in bus_reset_work()
2064 if (ohci->quirks & QUIRK_RESET_PACKET) in bus_reset_work()
2065 ohci->request_generation = generation; in bus_reset_work()
2076 if (ohci->next_config_rom != NULL) { in bus_reset_work()
2077 if (ohci->next_config_rom != ohci->config_rom) { in bus_reset_work()
2078 free_rom = ohci->config_rom; in bus_reset_work()
2079 free_rom_bus = ohci->config_rom_bus; in bus_reset_work()
2081 ohci->config_rom = ohci->next_config_rom; in bus_reset_work()
2082 ohci->config_rom_bus = ohci->next_config_rom_bus; in bus_reset_work()
2083 ohci->next_config_rom = NULL; in bus_reset_work()
2092 be32_to_cpu(ohci->config_rom[2])); in bus_reset_work()
2093 ohci->config_rom[0] = ohci->next_header; in bus_reset_work()
2095 be32_to_cpu(ohci->next_header)); in bus_reset_work()
2103 spin_unlock_irq(&ohci->lock); in bus_reset_work()
2106 dmam_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, free_rom, free_rom_bus); in bus_reset_work()
2110 fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation, in bus_reset_work()
2111 self_id_count, ohci->self_id_buffer, in bus_reset_work()
2112 ohci->csr_state_setclear_abdicate); in bus_reset_work()
2113 ohci->csr_state_setclear_abdicate = false; in bus_reset_work()
2136 queue_work(selfid_workqueue, &ohci->bus_reset_work); in irq_handler()
2139 tasklet_schedule(&ohci->ar_request_ctx.tasklet); in irq_handler()
2142 tasklet_schedule(&ohci->ar_response_ctx.tasklet); in irq_handler()
2145 tasklet_schedule(&ohci->at_request_ctx.tasklet); in irq_handler()
2148 tasklet_schedule(&ohci->at_response_ctx.tasklet); in irq_handler()
2155 i = ffs(iso_event) - 1; in irq_handler()
2157 &ohci->ir_context_list[i].context.tasklet); in irq_handler()
2167 i = ffs(iso_event) - 1; in irq_handler()
2169 &ohci->it_context_list[i].context.tasklet); in irq_handler()
2208 spin_lock(&ohci->lock); in irq_handler()
2210 spin_unlock(&ohci->lock); in irq_handler()
2226 return -ENODEV; /* Card was ejected. */ in software_reset()
2234 return -EBUSY; in software_reset()
2243 memset(&dest[length], 0, CONFIG_ROM_SIZE - size); in copy_config_rom()
2269 if (ohci->quirks & QUIRK_NO_1394A) in configure_1394a_enhancements()
2309 for (i = ARRAY_SIZE(id) - 1; i >= 0; i--) { in probe_tsb41ba3d()
2334 * most of the registers. In fact, on some cards (ALI M5251), in ohci_enable()
2357 ohci_err(ohci, "failed to set Link Power Status\n"); in ohci_enable()
2358 return -EIO; in ohci_enable()
2361 if (ohci->quirks & QUIRK_TI_SLLZ059) { in ohci_enable()
2368 ohci->quirks &= ~QUIRK_TI_SLLZ059; in ohci_enable()
2374 reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus); in ohci_enable()
2385 ohci->bus_time_running = false; in ohci_enable()
2388 if (ohci->ir_context_support & (1 << i)) in ohci_enable()
2396 card->broadcast_channel_auto_allocated = true; in ohci_enable()
2401 ohci->pri_req_max = reg_read(ohci, OHCI1394_FairnessControl) & 0x3f; in ohci_enable()
2403 card->priority_budget_implemented = ohci->pri_req_max != 0; in ohci_enable()
2424 * link, so we have a valid config rom before enabling - the in ohci_enable()
2430 * the ConfigRomHeader and BusOptions registers on bus reset. in ohci_enable()
2438 ohci->next_config_rom = dmam_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE, in ohci_enable()
2439 &ohci->next_config_rom_bus, GFP_KERNEL); in ohci_enable()
2440 if (ohci->next_config_rom == NULL) in ohci_enable()
2441 return -ENOMEM; in ohci_enable()
2443 copy_config_rom(ohci->next_config_rom, config_rom, length); in ohci_enable()
2449 ohci->next_config_rom = ohci->config_rom; in ohci_enable()
2450 ohci->next_config_rom_bus = ohci->config_rom_bus; in ohci_enable()
2453 ohci->next_header = ohci->next_config_rom[0]; in ohci_enable()
2454 ohci->next_config_rom[0] = 0; in ohci_enable()
2457 be32_to_cpu(ohci->next_config_rom[2])); in ohci_enable()
2458 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus); in ohci_enable()
2484 ar_context_run(&ohci->ar_request_ctx); in ohci_enable()
2485 ar_context_run(&ohci->ar_response_ctx); in ohci_enable()
2490 fw_schedule_bus_reset(&ohci->card, false, true); in ohci_enable()
2521 * during the atomic update, even on litte endian in ohci_set_config_rom()
2527 * We use ohci->lock to avoid racing with the code that sets in ohci_set_config_rom()
2528 * ohci->next_config_rom to NULL (see bus_reset_work). in ohci_set_config_rom()
2531 next_config_rom = dmam_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE, in ohci_set_config_rom()
2534 return -ENOMEM; in ohci_set_config_rom()
2536 spin_lock_irq(&ohci->lock); in ohci_set_config_rom()
2540 * push our new allocation into the ohci->next_config_rom in ohci_set_config_rom()
2549 if (ohci->next_config_rom == NULL) { in ohci_set_config_rom()
2550 ohci->next_config_rom = next_config_rom; in ohci_set_config_rom()
2551 ohci->next_config_rom_bus = next_config_rom_bus; in ohci_set_config_rom()
2555 copy_config_rom(ohci->next_config_rom, config_rom, length); in ohci_set_config_rom()
2557 ohci->next_header = config_rom[0]; in ohci_set_config_rom()
2558 ohci->next_config_rom[0] = 0; in ohci_set_config_rom()
2560 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus); in ohci_set_config_rom()
2562 spin_unlock_irq(&ohci->lock); in ohci_set_config_rom()
2566 dmam_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, next_config_rom, in ohci_set_config_rom()
2578 fw_schedule_bus_reset(&ohci->card, true, true); in ohci_set_config_rom()
2587 at_context_transmit(&ohci->at_request_ctx, packet); in ohci_send_request()
2594 at_context_transmit(&ohci->at_response_ctx, packet); in ohci_send_response()
2600 struct context *ctx = &ohci->at_request_ctx; in ohci_cancel_packet()
2601 struct driver_data *driver_data = packet->driver_data; in ohci_cancel_packet()
2602 int ret = -ENOENT; in ohci_cancel_packet()
2604 tasklet_disable_in_atomic(&ctx->tasklet); in ohci_cancel_packet()
2606 if (packet->ack != 0) in ohci_cancel_packet()
2609 if (packet->payload_mapped) in ohci_cancel_packet()
2610 dma_unmap_single(ohci->card.device, packet->payload_bus, in ohci_cancel_packet()
2611 packet->payload_length, DMA_TO_DEVICE); in ohci_cancel_packet()
2613 log_ar_at_event(ohci, 'T', packet->speed, packet->header, 0x20); in ohci_cancel_packet()
2614 driver_data->packet = NULL; in ohci_cancel_packet()
2615 packet->ack = RCODE_CANCELLED; in ohci_cancel_packet()
2617 // Timestamping on behalf of the hardware. in ohci_cancel_packet()
2618 packet->timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ohci)); in ohci_cancel_packet()
2620 packet->callback(packet, &ohci->card, packet->ack); in ohci_cancel_packet()
2623 tasklet_enable(&ctx->tasklet); in ohci_cancel_packet()
2640 * interrupt bit. Clear physReqResourceAllBuses on bus reset. in ohci_enable_phys_dma()
2643 spin_lock_irqsave(&ohci->lock, flags); in ohci_enable_phys_dma()
2645 if (ohci->generation != generation) { in ohci_enable_phys_dma()
2646 ret = -ESTALE; in ohci_enable_phys_dma()
2651 * Note, if the node ID contains a non-local bus ID, physical DMA is in ohci_enable_phys_dma()
2652 * enabled for _all_ nodes on remote buses. in ohci_enable_phys_dma()
2659 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 1 << (n - 32)); in ohci_enable_phys_dma()
2663 spin_unlock_irqrestore(&ohci->lock, flags); in ohci_enable_phys_dma()
2677 if (ohci->is_root && in ohci_read_csr()
2683 if (ohci->csr_state_setclear_abdicate) in ohci_read_csr()
2700 spin_lock_irqsave(&ohci->lock, flags); in ohci_read_csr()
2702 spin_unlock_irqrestore(&ohci->lock, flags); in ohci_read_csr()
2711 (ohci->pri_req_max << 8); in ohci_read_csr()
2726 if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) { in ohci_write_csr()
2732 ohci->csr_state_setclear_abdicate = false; in ohci_write_csr()
2736 if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) { in ohci_write_csr()
2742 ohci->csr_state_setclear_abdicate = true; in ohci_write_csr()
2758 spin_lock_irqsave(&ohci->lock, flags); in ohci_write_csr()
2759 ohci->bus_time = (update_bus_time(ohci) & 0x40) | in ohci_write_csr()
2761 spin_unlock_irqrestore(&ohci->lock, flags); in ohci_write_csr()
2784 ctx->base.callback.sc(&ctx->base, ctx->last_timestamp, in flush_iso_completions()
2785 ctx->header_length, ctx->header, in flush_iso_completions()
2786 ctx->base.callback_data); in flush_iso_completions()
2787 ctx->header_length = 0; in flush_iso_completions()
2794 if (ctx->header_length + ctx->base.header_size > PAGE_SIZE) { in copy_iso_headers()
2795 if (ctx->base.drop_overflow_headers) in copy_iso_headers()
2800 ctx_hdr = ctx->header + ctx->header_length; in copy_iso_headers()
2801 ctx->last_timestamp = (u16)le32_to_cpu((__force __le32)dma_hdr[0]); in copy_iso_headers()
2808 if (ctx->base.header_size > 0) in copy_iso_headers()
2810 if (ctx->base.header_size > 4) in copy_iso_headers()
2812 if (ctx->base.header_size > 8) in copy_iso_headers()
2813 memcpy(&ctx_hdr[2], &dma_hdr[2], ctx->base.header_size - 8); in copy_iso_headers()
2814 ctx->header_length += ctx->base.header_size; in copy_iso_headers()
2827 if (pd->transfer_status) in handle_ir_packet_per_buffer()
2833 while (!(d->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))) { in handle_ir_packet_per_buffer()
2835 buffer_dma = le32_to_cpu(d->data_address); in handle_ir_packet_per_buffer()
2836 dma_sync_single_range_for_cpu(context->ohci->card.device, in handle_ir_packet_per_buffer()
2839 le16_to_cpu(d->req_count), in handle_ir_packet_per_buffer()
2845 if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS)) in handle_ir_packet_per_buffer()
2861 req_count = le16_to_cpu(last->req_count); in handle_ir_buffer_fill()
2862 res_count = le16_to_cpu(READ_ONCE(last->res_count)); in handle_ir_buffer_fill()
2863 completed = req_count - res_count; in handle_ir_buffer_fill()
2864 buffer_dma = le32_to_cpu(last->data_address); in handle_ir_buffer_fill()
2867 ctx->mc_buffer_bus = buffer_dma; in handle_ir_buffer_fill()
2868 ctx->mc_completed = completed; in handle_ir_buffer_fill()
2875 dma_sync_single_range_for_cpu(context->ohci->card.device, in handle_ir_buffer_fill()
2880 if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS)) { in handle_ir_buffer_fill()
2881 ctx->base.callback.mc(&ctx->base, in handle_ir_buffer_fill()
2883 ctx->base.callback_data); in handle_ir_buffer_fill()
2884 ctx->mc_completed = 0; in handle_ir_buffer_fill()
2892 dma_sync_single_range_for_cpu(ctx->context.ohci->card.device, in flush_ir_buffer_fill()
2893 ctx->mc_buffer_bus & PAGE_MASK, in flush_ir_buffer_fill()
2894 ctx->mc_buffer_bus & ~PAGE_MASK, in flush_ir_buffer_fill()
2895 ctx->mc_completed, DMA_FROM_DEVICE); in flush_ir_buffer_fill()
2897 ctx->base.callback.mc(&ctx->base, in flush_ir_buffer_fill()
2898 ctx->mc_buffer_bus + ctx->mc_completed, in flush_ir_buffer_fill()
2899 ctx->base.callback_data); in flush_ir_buffer_fill()
2900 ctx->mc_completed = 0; in flush_ir_buffer_fill()
2910 if (pd->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS)) in sync_it_packet_for_cpu()
2921 if ((le32_to_cpu(pd->data_address) & PAGE_MASK) == in sync_it_packet_for_cpu()
2922 (context->current_bus & PAGE_MASK)) { in sync_it_packet_for_cpu()
2923 if (pd->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS)) in sync_it_packet_for_cpu()
2929 buffer_dma = le32_to_cpu(pd->data_address); in sync_it_packet_for_cpu()
2930 dma_sync_single_range_for_cpu(context->ohci->card.device, in sync_it_packet_for_cpu()
2933 le16_to_cpu(pd->req_count), in sync_it_packet_for_cpu()
2935 control = pd->control; in sync_it_packet_for_cpu()
2950 if (pd->transfer_status) in handle_it_packet()
2958 if (ctx->header_length + 4 > PAGE_SIZE) { in handle_it_packet()
2959 if (ctx->base.drop_overflow_headers) in handle_it_packet()
2964 ctx_hdr = ctx->header + ctx->header_length; in handle_it_packet()
2965 ctx->last_timestamp = le16_to_cpu(last->res_count); in handle_it_packet()
2966 /* Present this value as big-endian to match the receive code */ in handle_it_packet()
2967 *ctx_hdr = cpu_to_be32((le16_to_cpu(pd->transfer_status) << 16) | in handle_it_packet()
2968 le16_to_cpu(pd->res_count)); in handle_it_packet()
2969 ctx->header_length += 4; in handle_it_packet()
2971 if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS)) in handle_it_packet()
2985 ohci->mc_channels = channels; in set_multichannel_mask()
2996 int index, ret = -EBUSY; in ohci_allocate_iso_context()
2998 spin_lock_irq(&ohci->lock); in ohci_allocate_iso_context()
3002 mask = &ohci->it_context_mask; in ohci_allocate_iso_context()
3004 index = ffs(*mask) - 1; in ohci_allocate_iso_context()
3008 ctx = &ohci->it_context_list[index]; in ohci_allocate_iso_context()
3013 channels = &ohci->ir_context_channels; in ohci_allocate_iso_context()
3014 mask = &ohci->ir_context_mask; in ohci_allocate_iso_context()
3016 index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1; in ohci_allocate_iso_context()
3021 ctx = &ohci->ir_context_list[index]; in ohci_allocate_iso_context()
3026 mask = &ohci->ir_context_mask; in ohci_allocate_iso_context()
3028 index = !ohci->mc_allocated ? ffs(*mask) - 1 : -1; in ohci_allocate_iso_context()
3030 ohci->mc_allocated = true; in ohci_allocate_iso_context()
3033 ctx = &ohci->ir_context_list[index]; in ohci_allocate_iso_context()
3038 index = -1; in ohci_allocate_iso_context()
3039 ret = -ENOSYS; in ohci_allocate_iso_context()
3042 spin_unlock_irq(&ohci->lock); in ohci_allocate_iso_context()
3048 ctx->header_length = 0; in ohci_allocate_iso_context()
3049 ctx->header = (void *) __get_free_page(GFP_KERNEL); in ohci_allocate_iso_context()
3050 if (ctx->header == NULL) { in ohci_allocate_iso_context()
3051 ret = -ENOMEM; in ohci_allocate_iso_context()
3054 ret = context_init(&ctx->context, ohci, regs, callback); in ohci_allocate_iso_context()
3060 ctx->mc_completed = 0; in ohci_allocate_iso_context()
3063 return &ctx->base; in ohci_allocate_iso_context()
3066 free_page((unsigned long)ctx->header); in ohci_allocate_iso_context()
3068 spin_lock_irq(&ohci->lock); in ohci_allocate_iso_context()
3076 ohci->mc_allocated = false; in ohci_allocate_iso_context()
3081 spin_unlock_irq(&ohci->lock); in ohci_allocate_iso_context()
3090 struct fw_ohci *ohci = ctx->context.ohci; in ohci_start_iso()
3095 if (ctx->context.last->branch_address == 0) in ohci_start_iso()
3096 return -ENODATA; in ohci_start_iso()
3098 switch (ctx->base.type) { in ohci_start_iso()
3100 index = ctx - ohci->it_context_list; in ohci_start_iso()
3108 context_run(&ctx->context, match); in ohci_start_iso()
3115 index = ctx - ohci->ir_context_list; in ohci_start_iso()
3116 match = (tags << 28) | (sync << 8) | ctx->base.channel; in ohci_start_iso()
3124 reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match); in ohci_start_iso()
3125 context_run(&ctx->context, control); in ohci_start_iso()
3127 ctx->sync = sync; in ohci_start_iso()
3128 ctx->tags = tags; in ohci_start_iso()
3138 struct fw_ohci *ohci = fw_ohci(base->card); in ohci_stop_iso()
3142 switch (ctx->base.type) { in ohci_stop_iso()
3144 index = ctx - ohci->it_context_list; in ohci_stop_iso()
3150 index = ctx - ohci->ir_context_list; in ohci_stop_iso()
3155 context_stop(&ctx->context); in ohci_stop_iso()
3156 tasklet_kill(&ctx->context.tasklet); in ohci_stop_iso()
3163 struct fw_ohci *ohci = fw_ohci(base->card); in ohci_free_iso_context()
3169 context_release(&ctx->context); in ohci_free_iso_context()
3170 free_page((unsigned long)ctx->header); in ohci_free_iso_context()
3172 spin_lock_irqsave(&ohci->lock, flags); in ohci_free_iso_context()
3174 switch (base->type) { in ohci_free_iso_context()
3176 index = ctx - ohci->it_context_list; in ohci_free_iso_context()
3177 ohci->it_context_mask |= 1 << index; in ohci_free_iso_context()
3181 index = ctx - ohci->ir_context_list; in ohci_free_iso_context()
3182 ohci->ir_context_mask |= 1 << index; in ohci_free_iso_context()
3183 ohci->ir_context_channels |= 1ULL << base->channel; in ohci_free_iso_context()
3187 index = ctx - ohci->ir_context_list; in ohci_free_iso_context()
3188 ohci->ir_context_mask |= 1 << index; in ohci_free_iso_context()
3189 ohci->ir_context_channels |= ohci->mc_channels; in ohci_free_iso_context()
3190 ohci->mc_channels = 0; in ohci_free_iso_context()
3191 ohci->mc_allocated = false; in ohci_free_iso_context()
3195 spin_unlock_irqrestore(&ohci->lock, flags); in ohci_free_iso_context()
3200 struct fw_ohci *ohci = fw_ohci(base->card); in ohci_set_iso_channels()
3204 switch (base->type) { in ohci_set_iso_channels()
3207 spin_lock_irqsave(&ohci->lock, flags); in ohci_set_iso_channels()
3210 if (~ohci->ir_context_channels & ~ohci->mc_channels & *channels) { in ohci_set_iso_channels()
3211 *channels = ohci->ir_context_channels; in ohci_set_iso_channels()
3212 ret = -EBUSY; in ohci_set_iso_channels()
3218 spin_unlock_irqrestore(&ohci->lock, flags); in ohci_set_iso_channels()
3222 ret = -EINVAL; in ohci_set_iso_channels()
3234 for (i = 0 ; i < ohci->n_ir ; i++) { in ohci_resume_iso_dma()
3235 ctx = &ohci->ir_context_list[i]; in ohci_resume_iso_dma()
3236 if (ctx->context.running) in ohci_resume_iso_dma()
3237 ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags); in ohci_resume_iso_dma()
3240 for (i = 0 ; i < ohci->n_it ; i++) { in ohci_resume_iso_dma()
3241 ctx = &ohci->it_context_list[i]; in ohci_resume_iso_dma()
3242 if (ctx->context.running) in ohci_resume_iso_dma()
3243 ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags); in ohci_resume_iso_dma()
3264 if (p->skip) in queue_iso_transmit()
3268 if (p->header_length > 0) in queue_iso_transmit()
3272 end_page = PAGE_ALIGN(payload_index + p->payload_length) >> PAGE_SHIFT; in queue_iso_transmit()
3273 if (p->payload_length > 0) in queue_iso_transmit()
3274 payload_z = end_page - (payload_index >> PAGE_SHIFT); in queue_iso_transmit()
3281 header_z = DIV_ROUND_UP(p->header_length, sizeof(*d)); in queue_iso_transmit()
3283 d = context_get_descriptors(&ctx->context, z + header_z, &d_bus); in queue_iso_transmit()
3285 return -ENOMEM; in queue_iso_transmit()
3287 if (!p->skip) { in queue_iso_transmit()
3295 * FIXME: Make the context's cycle-lost behaviour configurable? in queue_iso_transmit()
3300 header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) | in queue_iso_transmit()
3301 IT_HEADER_TAG(p->tag) | in queue_iso_transmit()
3303 IT_HEADER_CHANNEL(ctx->base.channel) | in queue_iso_transmit()
3304 IT_HEADER_SPEED(ctx->base.speed)); in queue_iso_transmit()
3306 cpu_to_le32(IT_HEADER_DATA_LENGTH(p->header_length + in queue_iso_transmit()
3307 p->payload_length)); in queue_iso_transmit()
3310 if (p->header_length > 0) { in queue_iso_transmit()
3311 d[2].req_count = cpu_to_le16(p->header_length); in queue_iso_transmit()
3313 memcpy(&d[z], p->header, p->header_length); in queue_iso_transmit()
3316 pd = d + z - payload_z; in queue_iso_transmit()
3317 payload_end_index = payload_index + p->payload_length; in queue_iso_transmit()
3323 min(next_page_index, payload_end_index) - payload_index; in queue_iso_transmit()
3326 page_bus = page_private(buffer->pages[page]); in queue_iso_transmit()
3329 dma_sync_single_range_for_device(ctx->context.ohci->card.device, in queue_iso_transmit()
3336 if (p->interrupt) in queue_iso_transmit()
3341 last = z == 2 ? d : d + z - 1; in queue_iso_transmit()
3342 last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST | in queue_iso_transmit()
3347 context_append(&ctx->context, d, z, header_z); in queue_iso_transmit()
3357 struct device *device = ctx->context.ohci->card.device; in queue_iso_packet_per_buffer()
3368 packet_count = packet->header_length / ctx->base.header_size; in queue_iso_packet_per_buffer()
3369 header_size = max(ctx->base.header_size, (size_t)8); in queue_iso_packet_per_buffer()
3375 payload_per_buffer = packet->payload_length / packet_count; in queue_iso_packet_per_buffer()
3380 d = context_get_descriptors(&ctx->context, in queue_iso_packet_per_buffer()
3383 return -ENOMEM; in queue_iso_packet_per_buffer()
3385 d->control = cpu_to_le16(DESCRIPTOR_STATUS | in queue_iso_packet_per_buffer()
3387 if (packet->skip && i == 0) in queue_iso_packet_per_buffer()
3388 d->control |= cpu_to_le16(DESCRIPTOR_WAIT); in queue_iso_packet_per_buffer()
3389 d->req_count = cpu_to_le16(header_size); in queue_iso_packet_per_buffer()
3390 d->res_count = d->req_count; in queue_iso_packet_per_buffer()
3391 d->transfer_status = 0; in queue_iso_packet_per_buffer()
3392 d->data_address = cpu_to_le32(d_bus + (z * sizeof(*d))); in queue_iso_packet_per_buffer()
3398 pd->control = cpu_to_le16(DESCRIPTOR_STATUS | in queue_iso_packet_per_buffer()
3404 length = PAGE_SIZE - offset; in queue_iso_packet_per_buffer()
3405 pd->req_count = cpu_to_le16(length); in queue_iso_packet_per_buffer()
3406 pd->res_count = pd->req_count; in queue_iso_packet_per_buffer()
3407 pd->transfer_status = 0; in queue_iso_packet_per_buffer()
3409 page_bus = page_private(buffer->pages[page]); in queue_iso_packet_per_buffer()
3410 pd->data_address = cpu_to_le32(page_bus + offset); in queue_iso_packet_per_buffer()
3417 rest -= length; in queue_iso_packet_per_buffer()
3421 pd->control = cpu_to_le16(DESCRIPTOR_STATUS | in queue_iso_packet_per_buffer()
3424 if (packet->interrupt && i == packet_count - 1) in queue_iso_packet_per_buffer()
3425 pd->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS); in queue_iso_packet_per_buffer()
3427 context_append(&ctx->context, d, z, header_z); in queue_iso_packet_per_buffer()
3444 rest = packet->payload_length; in queue_iso_buffer_fill()
3449 if (WARN_ON(offset & 3 || rest & 3 || page + z > buffer->page_count)) in queue_iso_buffer_fill()
3450 return -EFAULT; in queue_iso_buffer_fill()
3453 d = context_get_descriptors(&ctx->context, 1, &d_bus); in queue_iso_buffer_fill()
3455 return -ENOMEM; in queue_iso_buffer_fill()
3457 d->control = cpu_to_le16(DESCRIPTOR_INPUT_MORE | in queue_iso_buffer_fill()
3459 if (packet->skip && i == 0) in queue_iso_buffer_fill()
3460 d->control |= cpu_to_le16(DESCRIPTOR_WAIT); in queue_iso_buffer_fill()
3461 if (packet->interrupt && i == z - 1) in queue_iso_buffer_fill()
3462 d->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS); in queue_iso_buffer_fill()
3467 length = PAGE_SIZE - offset; in queue_iso_buffer_fill()
3468 d->req_count = cpu_to_le16(length); in queue_iso_buffer_fill()
3469 d->res_count = d->req_count; in queue_iso_buffer_fill()
3470 d->transfer_status = 0; in queue_iso_buffer_fill()
3472 page_bus = page_private(buffer->pages[page]); in queue_iso_buffer_fill()
3473 d->data_address = cpu_to_le32(page_bus + offset); in queue_iso_buffer_fill()
3475 dma_sync_single_range_for_device(ctx->context.ohci->card.device, in queue_iso_buffer_fill()
3479 rest -= length; in queue_iso_buffer_fill()
3483 context_append(&ctx->context, d, 1, 0); in queue_iso_buffer_fill()
3496 int ret = -ENOSYS; in ohci_queue_iso()
3498 spin_lock_irqsave(&ctx->context.ohci->lock, flags); in ohci_queue_iso()
3499 switch (base->type) { in ohci_queue_iso()
3510 spin_unlock_irqrestore(&ctx->context.ohci->lock, flags); in ohci_queue_iso()
3518 &container_of(base, struct iso_context, base)->context; in ohci_flush_queue_iso()
3520 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE); in ohci_flush_queue_iso()
3528 tasklet_disable_in_atomic(&ctx->context.tasklet); in ohci_flush_iso_completions()
3530 if (!test_and_set_bit_lock(0, &ctx->flushing_completions)) { in ohci_flush_iso_completions()
3531 context_tasklet((unsigned long)&ctx->context); in ohci_flush_iso_completions()
3533 switch (base->type) { in ohci_flush_iso_completions()
3536 if (ctx->header_length != 0) in ohci_flush_iso_completions()
3540 if (ctx->mc_completed != 0) in ohci_flush_iso_completions()
3544 ret = -ENOSYS; in ohci_flush_iso_completions()
3547 clear_bit_unlock(0, &ctx->flushing_completions); in ohci_flush_iso_completions()
3551 tasklet_enable(&ctx->context.tasklet); in ohci_flush_iso_completions()
3614 ar_context_release(&ohci->ar_response_ctx); in release_ohci()
3615 ar_context_release(&ohci->ar_request_ctx); in release_ohci()
3617 dev_notice(dev, "removed fw-ohci device\n"); in release_ohci()
3629 if (dev->vendor == PCI_VENDOR_ID_PINNACLE_SYSTEMS) { in pci_probe()
3630 dev_err(&dev->dev, "Pinnacle MovieBoard is not yet supported\n"); in pci_probe()
3631 return -ENOSYS; in pci_probe()
3636 return -ENOMEM; in pci_probe()
3637 fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev); in pci_probe()
3640 devres_add(&dev->dev, ohci); in pci_probe()
3644 dev_err(&dev->dev, "failed to enable OHCI hardware\n"); in pci_probe()
3651 spin_lock_init(&ohci->lock); in pci_probe()
3652 mutex_init(&ohci->phy_reg_mutex); in pci_probe()
3654 INIT_WORK(&ohci->bus_reset_work, bus_reset_work); in pci_probe()
3659 return -ENXIO; in pci_probe()
3665 return -ENXIO; in pci_probe()
3667 ohci->registers = pcim_iomap_table(dev)[0]; in pci_probe()
3670 if ((ohci_quirks[i].vendor == dev->vendor) && in pci_probe()
3672 ohci_quirks[i].device == dev->device) && in pci_probe()
3674 ohci_quirks[i].revision >= dev->revision)) { in pci_probe()
3675 ohci->quirks = ohci_quirks[i].flags; in pci_probe()
3679 ohci->quirks = param_quirks; in pci_probe()
3682 ohci->quirks |= QUIRK_REBOOT_BY_CYCLE_TIMER_READ; in pci_probe()
3691 ohci->misc_buffer = dmam_alloc_coherent(&dev->dev, PAGE_SIZE, &ohci->misc_buffer_bus, in pci_probe()
3693 if (!ohci->misc_buffer) in pci_probe()
3694 return -ENOMEM; in pci_probe()
3696 err = ar_context_init(&ohci->ar_request_ctx, ohci, 0, in pci_probe()
3701 err = ar_context_init(&ohci->ar_response_ctx, ohci, PAGE_SIZE/4, in pci_probe()
3706 err = context_init(&ohci->at_request_ctx, ohci, in pci_probe()
3711 err = context_init(&ohci->at_response_ctx, ohci, in pci_probe()
3717 ohci->ir_context_channels = ~0ULL; in pci_probe()
3718 ohci->ir_context_support = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet); in pci_probe()
3720 ohci->ir_context_mask = ohci->ir_context_support; in pci_probe()
3721 ohci->n_ir = hweight32(ohci->ir_context_mask); in pci_probe()
3722 size = sizeof(struct iso_context) * ohci->n_ir; in pci_probe()
3723 ohci->ir_context_list = devm_kzalloc(&dev->dev, size, GFP_KERNEL); in pci_probe()
3724 if (!ohci->ir_context_list) in pci_probe()
3725 return -ENOMEM; in pci_probe()
3728 ohci->it_context_support = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet); in pci_probe()
3729 /* JMicron JMB38x often shows 0 at first read, just ignore it */ in pci_probe()
3730 if (!ohci->it_context_support) { in pci_probe()
3732 ohci->it_context_support = 0xf; in pci_probe()
3735 ohci->it_context_mask = ohci->it_context_support; in pci_probe()
3736 ohci->n_it = hweight32(ohci->it_context_mask); in pci_probe()
3737 size = sizeof(struct iso_context) * ohci->n_it; in pci_probe()
3738 ohci->it_context_list = devm_kzalloc(&dev->dev, size, GFP_KERNEL); in pci_probe()
3739 if (!ohci->it_context_list) in pci_probe()
3740 return -ENOMEM; in pci_probe()
3742 ohci->self_id = ohci->misc_buffer + PAGE_SIZE/2; in pci_probe()
3743 ohci->self_id_bus = ohci->misc_buffer_bus + PAGE_SIZE/2; in pci_probe()
3751 if (!(ohci->quirks & QUIRK_NO_MSI)) in pci_probe()
3753 err = devm_request_irq(&dev->dev, dev->irq, irq_handler, in pci_probe()
3756 ohci_err(ohci, "failed to allocate interrupt %d\n", dev->irq); in pci_probe()
3760 err = fw_card_add(&ohci->card, max_receive, link_speed, guid); in pci_probe()
3768 version >> 16, version & 0xff, ohci->card.index, in pci_probe()
3769 ohci->n_ir, ohci->n_it, ohci->quirks, in pci_probe()
3776 devm_free_irq(&dev->dev, dev->irq, ohci); in pci_probe()
3794 cancel_work_sync(&ohci->bus_reset_work); in pci_remove()
3795 fw_core_remove_card(&ohci->card); in pci_remove()
3804 devm_free_irq(&dev->dev, dev->irq, ohci); in pci_remove()
3807 dev_notice(&dev->dev, "removing fw-ohci device\n"); in pci_remove()
3844 /* Some systems don't setup GUID register on resume from ram */ in pci_resume()
3847 reg_write(ohci, OHCI1394_GUIDLo, (u32)ohci->card.guid); in pci_resume()
3848 reg_write(ohci, OHCI1394_GUIDHi, (u32)(ohci->card.guid >> 32)); in pci_resume()
3851 err = ohci_enable(&ohci->card, NULL, 0); in pci_resume()
3883 return -ENOMEM; in fw_ohci_init()
3901 /* Provide a module alias so root-on-sbp2 initrds don't break. */