Lines Matching +full:revision +full:- +full:id2

1 // SPDX-License-Identifier: GPL-2.0-or-later
5 * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
13 #include <linux/dma-mapping.h>
15 #include <linux/firewire-constants.h>
43 #include "packet-header-definitions.h"
44 #include "phy-packet-definitions.h"
53 #define ohci_notice(ohci, f, args...) dev_notice(ohci->card.device, f, ##args)
54 #define ohci_err(ohci, f, args...) dev_err(ohci->card.device, f, ##args)
114 * A buffer that contains a block of DMA-able coherent memory used for
134 * List of page-sized buffers for storing DMA descriptors.
177 #define CONFIG_ROM_SIZE (CSR_CONFIG_ROM_END - CSR_CONFIG_ROM)
293 return !!(ohci->quirks & QUIRK_REBOOT_BY_CYCLE_TIMER_READ); in has_reboot_by_cycle_timer_read_quirk()
307 if (pdev->vendor != PCI_VENDOR_ID_VIA) in detect_vt630x_with_asm1083_on_amd_ryzen_machine()
309 if (pdev->device != PCI_DEVICE_ID_VIA_VT630X) in detect_vt630x_with_asm1083_on_amd_ryzen_machine()
313 pcie_to_pci_bridge = pdev->bus->self; in detect_vt630x_with_asm1083_on_amd_ryzen_machine()
314 if (pcie_to_pci_bridge->vendor != PCI_VENDOR_ID_ASMEDIA) in detect_vt630x_with_asm1083_on_amd_ryzen_machine()
316 if (pcie_to_pci_bridge->device != PCI_DEVICE_ID_ASMEDIA_ASM108X) in detect_vt630x_with_asm1083_on_amd_ryzen_machine()
329 unsigned short vendor, device, revision, flags; member
401 ", self-IDs = " __stringify(OHCI_PARAM_DEBUG_SELFIDS)
403 ", or a combination, or all = -1)");
446 [4] = "-3W", [5] = " ?W", [6] = "-3..-6W", [7] = "-3..-10W", in log_selfids()
450 [PHY_PACKET_SELF_ID_PORT_STATUS_NCONN] = '-', in log_selfids()
455 .cursor = ohci->self_id_buffer, in log_selfids()
463 self_id_count, generation, ohci->node_id); in log_selfids()
508 [0x00] = "evt_no_status", [0x01] = "-reserved-",
514 [0x0c] = "-reserved-", [0x0d] = "-reserved-",
516 [0x10] = "-reserved-", [0x11] = "ack_complete",
517 [0x12] = "ack_pending ", [0x13] = "-reserved-",
519 [0x16] = "ack_busy_B", [0x17] = "-reserved-",
520 [0x18] = "-reserved-", [0x19] = "-reserved-",
521 [0x1a] = "-reserved-", [0x1b] = "ack_tardy",
522 [0x1c] = "-reserved-", [0x1d] = "ack_data_error",
523 [0x1e] = "ack_type_error", [0x1f] = "-reserved-",
534 [0x3] = "-reserved-", in log_ar_at_event()
543 [0xc] = "-reserved-", in log_ar_at_event()
544 [0xd] = "-reserved-", in log_ar_at_event()
546 [0xf] = "-reserved-", in log_ar_at_event()
598 "A%c spd %x tl %02x, %04x -> %04x, %s, %s, %012llx%s\n", in log_ar_at_event()
605 "A%c spd %x tl %02x, %04x -> %04x, %s, %s%s\n", in log_ar_at_event()
614 writel(data, ohci->registers + offset); in reg_write()
619 return readl(ohci->registers + offset); in reg_read()
630 * read_paged_phy_reg() require the caller to hold ohci->phy_reg_mutex.
643 return -ENODEV; /* Card was ejected. */ in read_phy_reg()
658 return -EBUSY; in read_phy_reg()
670 return -ENODEV; /* Card was ejected. */ in write_phy_reg()
681 return -EBUSY; in write_phy_reg()
716 guard(mutex)(&ohci->phy_reg_mutex); in ohci_read_phy_reg()
726 guard(mutex)(&ohci->phy_reg_mutex); in ohci_update_phy_reg()
733 return page_private(ctx->pages[i]); in ar_buffer_bus()
740 d = &ctx->descriptors[index]; in ar_context_link_page()
741 d->branch_address &= cpu_to_le32(~0xf); in ar_context_link_page()
742 d->res_count = cpu_to_le16(PAGE_SIZE); in ar_context_link_page()
743 d->transfer_status = 0; in ar_context_link_page()
746 d = &ctx->descriptors[ctx->last_buffer_index]; in ar_context_link_page()
747 d->branch_address |= cpu_to_le32(1); in ar_context_link_page()
749 ctx->last_buffer_index = index; in ar_context_link_page()
751 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE); in ar_context_link_page()
756 struct device *dev = ctx->ohci->card.device; in ar_context_release()
759 if (!ctx->buffer) in ar_context_release()
762 vunmap(ctx->buffer); in ar_context_release()
765 if (ctx->pages[i]) in ar_context_release()
766 dma_free_pages(dev, PAGE_SIZE, ctx->pages[i], in ar_context_release()
773 struct fw_ohci *ohci = ctx->ohci; in ar_context_abort()
775 if (reg_read(ohci, CONTROL_CLEAR(ctx->regs)) & CONTEXT_RUN) { in ar_context_abort()
776 reg_write(ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN); in ar_context_abort()
791 return ar_next_buffer_index(ctx->last_buffer_index); in ar_first_buffer_index()
801 unsigned int i, next_i, last = ctx->last_buffer_index; in ar_search_last_active_buffer()
805 res_count = READ_ONCE(ctx->descriptors[i].res_count); in ar_search_last_active_buffer()
813 next_res_count = READ_ONCE(ctx->descriptors[next_i].res_count); in ar_search_last_active_buffer()
829 next_res_count = READ_ONCE(ctx->descriptors[next_i].res_count); in ar_search_last_active_buffer()
844 *buffer_offset = PAGE_SIZE - le16_to_cpu(res_count); in ar_search_last_active_buffer()
861 dma_sync_single_for_cpu(ctx->ohci->card.device, in ar_sync_buffers_for_cpu()
867 dma_sync_single_for_cpu(ctx->ohci->card.device, in ar_sync_buffers_for_cpu()
880 return !!(ohci->quirks & QUIRK_BE_HEADERS); in has_be_header_quirk()
896 struct fw_ohci *ohci = ctx->ohci; in handle_ar_packet()
952 p.ack = evt - 16; in handle_ar_packet()
955 p.generation = ohci->request_generation; in handle_ar_packet()
980 if (!(ohci->quirks & QUIRK_RESET_PACKET)) in handle_ar_packet()
981 ohci->request_generation = (p.header[2] >> 16) & 0xff; in handle_ar_packet()
982 } else if (ctx == &ohci->ar_request_ctx) { in handle_ar_packet()
983 fw_core_handle_request(&ohci->card, &p); in handle_ar_packet()
985 fw_core_handle_response(&ohci->card, &p); in handle_ar_packet()
1011 dma_sync_single_for_device(ctx->ohci->card.device, in ar_recycle_buffers()
1025 p = ctx->pointer; in ar_context_tasklet()
1032 end = ctx->buffer + end_buffer_index * PAGE_SIZE + end_buffer_offset; in ar_context_tasklet()
1041 void *buffer_end = ctx->buffer + AR_BUFFERS * PAGE_SIZE; in ar_context_tasklet()
1046 p -= AR_BUFFERS * PAGE_SIZE; in ar_context_tasklet()
1056 ctx->pointer = p; in ar_context_tasklet()
1062 ctx->pointer = NULL; in ar_context_tasklet()
1068 struct device *dev = ohci->card.device; in ar_context_init()
1074 ctx->regs = regs; in ar_context_init()
1075 ctx->ohci = ohci; in ar_context_init()
1076 tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx); in ar_context_init()
1079 ctx->pages[i] = dma_alloc_pages(dev, PAGE_SIZE, &dma_addr, in ar_context_init()
1081 if (!ctx->pages[i]) in ar_context_init()
1083 set_page_private(ctx->pages[i], dma_addr); in ar_context_init()
1089 pages[i] = ctx->pages[i]; in ar_context_init()
1091 pages[AR_BUFFERS + i] = ctx->pages[i]; in ar_context_init()
1092 ctx->buffer = vmap(pages, ARRAY_SIZE(pages), VM_MAP, PAGE_KERNEL); in ar_context_init()
1093 if (!ctx->buffer) in ar_context_init()
1096 ctx->descriptors = ohci->misc_buffer + descriptors_offset; in ar_context_init()
1097 ctx->descriptors_bus = ohci->misc_buffer_bus + descriptors_offset; in ar_context_init()
1100 d = &ctx->descriptors[i]; in ar_context_init()
1101 d->req_count = cpu_to_le16(PAGE_SIZE); in ar_context_init()
1102 d->control = cpu_to_le16(DESCRIPTOR_INPUT_MORE | in ar_context_init()
1105 d->data_address = cpu_to_le32(ar_buffer_bus(ctx, i)); in ar_context_init()
1106 d->branch_address = cpu_to_le32(ctx->descriptors_bus + in ar_context_init()
1115 return -ENOMEM; in ar_context_init()
1125 ctx->pointer = ctx->buffer; in ar_context_run()
1127 reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ctx->descriptors_bus | 1); in ar_context_run()
1128 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN); in ar_context_run()
1135 branch = d->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS); in find_branch_descriptor()
1141 return d + z - 1; in find_branch_descriptor()
1151 desc = list_entry(ctx->buffer_list.next, in context_retire_descriptors()
1153 last = ctx->last; in context_retire_descriptors()
1154 while (last->branch_address != 0) { in context_retire_descriptors()
1156 address = le32_to_cpu(last->branch_address); in context_retire_descriptors()
1159 ctx->current_bus = address; in context_retire_descriptors()
1163 if (address < desc->buffer_bus || in context_retire_descriptors()
1164 address >= desc->buffer_bus + desc->used) in context_retire_descriptors()
1165 desc = list_entry(desc->list.next, in context_retire_descriptors()
1167 d = desc->buffer + (address - desc->buffer_bus) / sizeof(*d); in context_retire_descriptors()
1170 if (!ctx->callback(ctx, d, last)) in context_retire_descriptors()
1176 old_desc->used = 0; in context_retire_descriptors()
1177 guard(spinlock_irqsave)(&ctx->ohci->lock); in context_retire_descriptors()
1178 list_move_tail(&old_desc->list, &ctx->buffer_list); in context_retire_descriptors()
1180 ctx->last = last; in context_retire_descriptors()
1196 context_retire_descriptors(&isoc_ctx->context); in ohci_isoc_context_work()
1201 * context. Must be called with ohci->lock held.
1211 * program. This will catch run-away userspace or DoS attacks. in context_add_buffer()
1213 if (ctx->total_allocation >= 16*1024*1024) in context_add_buffer()
1214 return -ENOMEM; in context_add_buffer()
1216 desc = dmam_alloc_coherent(ctx->ohci->card.device, PAGE_SIZE, &bus_addr, GFP_ATOMIC); in context_add_buffer()
1218 return -ENOMEM; in context_add_buffer()
1220 offset = (void *)&desc->buffer - (void *)desc; in context_add_buffer()
1222 * Some controllers, like JMicron ones, always issue 0x20-byte DMA reads in context_add_buffer()
1223 * for descriptors, even 0x10-byte ones. This can cause page faults when in context_add_buffer()
1227 desc->buffer_size = PAGE_SIZE - offset - 0x10; in context_add_buffer()
1228 desc->buffer_bus = bus_addr + offset; in context_add_buffer()
1229 desc->used = 0; in context_add_buffer()
1231 list_add_tail(&desc->list, &ctx->buffer_list); in context_add_buffer()
1232 ctx->total_allocation += PAGE_SIZE; in context_add_buffer()
1240 ctx->ohci = ohci; in context_init()
1241 ctx->regs = regs; in context_init()
1242 ctx->total_allocation = 0; in context_init()
1244 INIT_LIST_HEAD(&ctx->buffer_list); in context_init()
1246 return -ENOMEM; in context_init()
1248 ctx->buffer_tail = list_entry(ctx->buffer_list.next, in context_init()
1251 tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx); in context_init()
1252 ctx->callback = callback; in context_init()
1259 memset(ctx->buffer_tail->buffer, 0, sizeof(*ctx->buffer_tail->buffer)); in context_init()
1260 ctx->buffer_tail->buffer->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST); in context_init()
1261 ctx->buffer_tail->buffer->transfer_status = cpu_to_le16(0x8011); in context_init()
1262 ctx->buffer_tail->used += sizeof(*ctx->buffer_tail->buffer); in context_init()
1263 ctx->last = ctx->buffer_tail->buffer; in context_init()
1264 ctx->prev = ctx->buffer_tail->buffer; in context_init()
1265 ctx->prev_z = 1; in context_init()
1272 struct fw_card *card = &ctx->ohci->card; in context_release()
1275 list_for_each_entry_safe(desc, tmp, &ctx->buffer_list, list) { in context_release()
1276 dmam_free_coherent(card->device, PAGE_SIZE, desc, in context_release()
1277 desc->buffer_bus - ((void *)&desc->buffer - (void *)desc)); in context_release()
1281 /* Must be called with ohci->lock held */
1286 struct descriptor_buffer *desc = ctx->buffer_tail; in context_get_descriptors()
1288 if (z * sizeof(*d) > desc->buffer_size) in context_get_descriptors()
1291 if (z * sizeof(*d) > desc->buffer_size - desc->used) { in context_get_descriptors()
1295 if (desc->list.next == &ctx->buffer_list) { in context_get_descriptors()
1301 desc = list_entry(desc->list.next, in context_get_descriptors()
1303 ctx->buffer_tail = desc; in context_get_descriptors()
1306 d = desc->buffer + desc->used / sizeof(*d); in context_get_descriptors()
1308 *d_bus = desc->buffer_bus + desc->used; in context_get_descriptors()
1315 struct fw_ohci *ohci = ctx->ohci; in context_run()
1317 reg_write(ohci, COMMAND_PTR(ctx->regs), in context_run()
1318 le32_to_cpu(ctx->last->branch_address)); in context_run()
1319 reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0); in context_run()
1320 reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra); in context_run()
1321 ctx->running = true; in context_run()
1329 struct descriptor_buffer *desc = ctx->buffer_tail; in context_append()
1332 d_bus = desc->buffer_bus + (d - desc->buffer) * sizeof(*d); in context_append()
1334 desc->used += (z + extra) * sizeof(*d); in context_append()
1338 d_branch = find_branch_descriptor(ctx->prev, ctx->prev_z); in context_append()
1339 d_branch->branch_address = cpu_to_le32(d_bus | z); in context_append()
1344 * multi-descriptor block starting with an INPUT_MORE, put a copy of in context_append()
1350 if (unlikely(ctx->ohci->quirks & QUIRK_IR_WAKE) && in context_append()
1351 d_branch != ctx->prev && in context_append()
1352 (ctx->prev->control & cpu_to_le16(DESCRIPTOR_CMD)) == in context_append()
1354 ctx->prev->branch_address = cpu_to_le32(d_bus | z); in context_append()
1357 ctx->prev = d; in context_append()
1358 ctx->prev_z = z; in context_append()
1363 struct fw_ohci *ohci = ctx->ohci; in context_stop()
1367 reg_write(ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN); in context_stop()
1368 ctx->running = false; in context_stop()
1371 reg = reg_read(ohci, CONTROL_SET(ctx->regs)); in context_stop()
1388 * Must always be called with the ochi->lock held to ensure proper
1394 struct fw_ohci *ohci = ctx->ohci; in at_context_queue_packet()
1403 packet->ack = RCODE_SEND_ERROR; in at_context_queue_packet()
1404 return -1; in at_context_queue_packet()
1408 d[0].res_count = cpu_to_le16(packet->timestamp); in at_context_queue_packet()
1410 tcode = async_header_get_tcode(packet->header); in at_context_queue_packet()
1423 ohci1394_at_data_set_speed(header, packet->speed); in at_context_queue_packet()
1424 ohci1394_at_data_set_tlabel(header, async_header_get_tlabel(packet->header)); in at_context_queue_packet()
1425 ohci1394_at_data_set_retry(header, async_header_get_retry(packet->header)); in at_context_queue_packet()
1429 async_header_get_destination(packet->header)); in at_context_queue_packet()
1431 if (ctx == &ctx->ohci->at_response_ctx) { in at_context_queue_packet()
1432 ohci1394_at_data_set_rcode(header, async_header_get_rcode(packet->header)); in at_context_queue_packet()
1435 async_header_get_offset(packet->header)); in at_context_queue_packet()
1439 header[3] = cpu_to_le32(packet->header[3]); in at_context_queue_packet()
1441 header[3] = (__force __le32) packet->header[3]; in at_context_queue_packet()
1443 d[0].req_count = cpu_to_le16(packet->header_length); in at_context_queue_packet()
1446 ohci1394_at_data_set_speed(header, packet->speed); in at_context_queue_packet()
1449 header[1] = cpu_to_le32(packet->header[1]); in at_context_queue_packet()
1450 header[2] = cpu_to_le32(packet->header[2]); in at_context_queue_packet()
1453 if (is_ping_packet(&packet->header[1])) in at_context_queue_packet()
1458 ohci1394_it_data_set_speed(header, packet->speed); in at_context_queue_packet()
1459 ohci1394_it_data_set_tag(header, isoc_header_get_tag(packet->header[0])); in at_context_queue_packet()
1460 ohci1394_it_data_set_channel(header, isoc_header_get_channel(packet->header[0])); in at_context_queue_packet()
1462 ohci1394_it_data_set_sync(header, isoc_header_get_sy(packet->header[0])); in at_context_queue_packet()
1464 ohci1394_it_data_set_data_length(header, isoc_header_get_data_length(packet->header[0])); in at_context_queue_packet()
1471 packet->ack = RCODE_SEND_ERROR; in at_context_queue_packet()
1472 return -1; in at_context_queue_packet()
1477 driver_data->packet = packet; in at_context_queue_packet()
1478 packet->driver_data = driver_data; in at_context_queue_packet()
1480 if (packet->payload_length > 0) { in at_context_queue_packet()
1481 if (packet->payload_length > sizeof(driver_data->inline_data)) { in at_context_queue_packet()
1482 payload_bus = dma_map_single(ohci->card.device, in at_context_queue_packet()
1483 packet->payload, in at_context_queue_packet()
1484 packet->payload_length, in at_context_queue_packet()
1486 if (dma_mapping_error(ohci->card.device, payload_bus)) { in at_context_queue_packet()
1487 packet->ack = RCODE_SEND_ERROR; in at_context_queue_packet()
1488 return -1; in at_context_queue_packet()
1490 packet->payload_bus = payload_bus; in at_context_queue_packet()
1491 packet->payload_mapped = true; in at_context_queue_packet()
1493 memcpy(driver_data->inline_data, packet->payload, in at_context_queue_packet()
1494 packet->payload_length); in at_context_queue_packet()
1498 d[2].req_count = cpu_to_le16(packet->payload_length); in at_context_queue_packet()
1507 last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST | in at_context_queue_packet()
1512 if (ohci->generation != packet->generation) { in at_context_queue_packet()
1513 if (packet->payload_mapped) in at_context_queue_packet()
1514 dma_unmap_single(ohci->card.device, payload_bus, in at_context_queue_packet()
1515 packet->payload_length, DMA_TO_DEVICE); in at_context_queue_packet()
1516 packet->ack = RCODE_GENERATION; in at_context_queue_packet()
1517 return -1; in at_context_queue_packet()
1520 context_append(ctx, d, z, 4 - z); in at_context_queue_packet()
1522 if (ctx->running) in at_context_queue_packet()
1523 reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE); in at_context_queue_packet()
1532 tasklet_disable(&ctx->tasklet); in at_context_flush()
1534 ctx->flushing = true; in at_context_flush()
1536 ctx->flushing = false; in at_context_flush()
1538 tasklet_enable(&ctx->tasklet); in at_context_flush()
1547 struct fw_ohci *ohci = context->ohci; in handle_at_packet()
1550 if (last->transfer_status == 0 && !context->flushing) in handle_at_packet()
1555 packet = driver_data->packet; in handle_at_packet()
1560 if (packet->payload_mapped) in handle_at_packet()
1561 dma_unmap_single(ohci->card.device, packet->payload_bus, in handle_at_packet()
1562 packet->payload_length, DMA_TO_DEVICE); in handle_at_packet()
1564 evt = le16_to_cpu(last->transfer_status) & 0x1f; in handle_at_packet()
1565 packet->timestamp = le16_to_cpu(last->res_count); in handle_at_packet()
1567 log_ar_at_event(ohci, 'T', packet->speed, packet->header, evt); in handle_at_packet()
1572 packet->ack = RCODE_CANCELLED; in handle_at_packet()
1580 packet->ack = RCODE_GENERATION; in handle_at_packet()
1584 if (context->flushing) in handle_at_packet()
1585 packet->ack = RCODE_GENERATION; in handle_at_packet()
1591 packet->ack = RCODE_NO_ACK; in handle_at_packet()
1602 packet->ack = evt - 0x10; in handle_at_packet()
1606 if (context->flushing) { in handle_at_packet()
1607 packet->ack = RCODE_GENERATION; in handle_at_packet()
1613 packet->ack = RCODE_SEND_ERROR; in handle_at_packet()
1617 packet->callback(packet, &ohci->card, packet->ack); in handle_at_packet()
1630 tcode = async_header_get_tcode(packet->header); in handle_local_rom()
1632 length = async_header_get_data_length(packet->header); in handle_local_rom()
1636 i = csr - CSR_CONFIG_ROM; in handle_local_rom()
1638 fw_fill_response(&response, packet->header, in handle_local_rom()
1641 fw_fill_response(&response, packet->header, in handle_local_rom()
1644 fw_fill_response(&response, packet->header, RCODE_COMPLETE, in handle_local_rom()
1645 (void *) ohci->config_rom + i, length); in handle_local_rom()
1650 fw_core_handle_response(&ohci->card, &response); in handle_local_rom()
1661 tcode = async_header_get_tcode(packet->header); in handle_local_lock()
1662 length = async_header_get_data_length(packet->header); in handle_local_lock()
1663 payload = packet->payload; in handle_local_lock()
1664 ext_tcode = async_header_get_extended_tcode(packet->header); in handle_local_lock()
1674 fw_fill_response(&response, packet->header, in handle_local_lock()
1679 sel = (csr - CSR_BUS_MANAGER_ID) / 4; in handle_local_lock()
1688 fw_fill_response(&response, packet->header, in handle_local_lock()
1695 fw_fill_response(&response, packet->header, RCODE_BUSY, NULL, 0); in handle_local_lock()
1700 fw_core_handle_response(&ohci->card, &response); in handle_local_lock()
1707 if (ctx == &ctx->ohci->at_request_ctx) { in handle_local_request()
1708 packet->ack = ACK_PENDING; in handle_local_request()
1709 packet->callback(packet, &ctx->ohci->card, packet->ack); in handle_local_request()
1712 offset = async_header_get_offset(packet->header); in handle_local_request()
1713 csr = offset - CSR_REGISTER_BASE; in handle_local_request()
1717 handle_local_rom(ctx->ohci, packet, csr); in handle_local_request()
1723 handle_local_lock(ctx->ohci, packet, csr); in handle_local_request()
1726 if (ctx == &ctx->ohci->at_request_ctx) in handle_local_request()
1727 fw_core_handle_request(&ctx->ohci->card, packet); in handle_local_request()
1729 fw_core_handle_response(&ctx->ohci->card, packet); in handle_local_request()
1733 if (ctx == &ctx->ohci->at_response_ctx) { in handle_local_request()
1734 packet->ack = ACK_COMPLETE; in handle_local_request()
1735 packet->callback(packet, &ctx->ohci->card, packet->ack); in handle_local_request()
1744 spin_lock_irqsave(&ctx->ohci->lock, flags); in at_context_transmit()
1746 if (async_header_get_destination(packet->header) == ctx->ohci->node_id && in at_context_transmit()
1747 ctx->ohci->generation == packet->generation) { in at_context_transmit()
1748 spin_unlock_irqrestore(&ctx->ohci->lock, flags); in at_context_transmit()
1751 packet->timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ctx->ohci)); in at_context_transmit()
1758 spin_unlock_irqrestore(&ctx->ohci->lock, flags); in at_context_transmit()
1762 packet->timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ctx->ohci)); in at_context_transmit()
1764 packet->callback(packet, &ctx->ohci->card, packet->ack); in at_context_transmit()
1789 if (!(ohci->it_context_support & (1 << i))) in handle_dead_contexts()
1795 if (!(ohci->ir_context_support & (1 << i))) in handle_dead_contexts()
1817 * - When the lowest six bits are wrapping around to zero, a read that happens
1819 * - When the cycleOffset field wraps around to zero, the cycleCount field is
1821 * - Occasionally, the entire register reads zero.
1841 if (ohci->quirks & QUIRK_CYCLE_TIMER) { in get_cycle_time()
1852 diff01 = t1 - t0; in get_cycle_time()
1853 diff12 = t2 - t1; in get_cycle_time()
1872 if (unlikely(!ohci->bus_time_running)) { in update_bus_time()
1874 ohci->bus_time = (lower_32_bits(ktime_get_seconds()) & ~0x7f) | in update_bus_time()
1876 ohci->bus_time_running = true; in update_bus_time()
1879 if ((ohci->bus_time & 0x40) != (cycle_time_seconds & 0x40)) in update_bus_time()
1880 ohci->bus_time += 0x40; in update_bus_time()
1882 return ohci->bus_time | cycle_time_seconds; in update_bus_time()
1890 scoped_guard(mutex, &ohci->phy_reg_mutex) { in get_status_for_port()
1925 u32 entry = ohci->self_id_buffer[i]; in get_self_id_pos()
1929 return -1; in get_self_id_pos()
1940 guard(mutex)(&ohci->phy_reg_mutex); in detect_initiated_reset()
1989 return -EBUSY; in find_and_insert_self_id()
1993 reg = ohci_read_phy_reg(&ohci->card, 4); in find_and_insert_self_id()
1998 reg = ohci_read_phy_reg(&ohci->card, 1); in find_and_insert_self_id()
2020 memmove(&(ohci->self_id_buffer[pos+1]), in find_and_insert_self_id()
2021 &(ohci->self_id_buffer[pos]), in find_and_insert_self_id()
2022 (self_id_count - pos) * sizeof(*ohci->self_id_buffer)); in find_and_insert_self_id()
2023 ohci->self_id_buffer[pos] = self_id; in find_and_insert_self_id()
2049 ohci->node_id = reg & (OHCI1394_NodeID_busNumber | in bus_reset_work()
2053 if (!(ohci->is_root && is_new_root)) in bus_reset_work()
2056 ohci->is_root = is_new_root; in bus_reset_work()
2076 quadlet = cond_le32_to_cpu(ohci->self_id[0], has_be_header_quirk(ohci)); in bus_reset_work()
2081 u32 id = cond_le32_to_cpu(ohci->self_id[i], has_be_header_quirk(ohci)); in bus_reset_work()
2082 u32 id2 = cond_le32_to_cpu(ohci->self_id[i + 1], has_be_header_quirk(ohci)); in bus_reset_work() local
2084 if (id != ~id2) { in bus_reset_work()
2099 j, self_id_count, id, id2); in bus_reset_work()
2102 ohci->self_id_buffer[j] = id; in bus_reset_work()
2105 if (ohci->quirks & QUIRK_TI_SLLZ059) { in bus_reset_work()
2142 scoped_guard(spinlock_irq, &ohci->lock) { in bus_reset_work()
2143 ohci->generation = -1; // prevent AT packet queueing in bus_reset_work()
2144 context_stop(&ohci->at_request_ctx); in bus_reset_work()
2145 context_stop(&ohci->at_response_ctx); in bus_reset_work()
2153 at_context_flush(&ohci->at_request_ctx); in bus_reset_work()
2154 at_context_flush(&ohci->at_response_ctx); in bus_reset_work()
2156 scoped_guard(spinlock_irq, &ohci->lock) { in bus_reset_work()
2157 ohci->generation = generation; in bus_reset_work()
2161 if (ohci->quirks & QUIRK_RESET_PACKET) in bus_reset_work()
2162 ohci->request_generation = generation; in bus_reset_work()
2169 if (ohci->next_config_rom != NULL) { in bus_reset_work()
2170 if (ohci->next_config_rom != ohci->config_rom) { in bus_reset_work()
2171 free_rom = ohci->config_rom; in bus_reset_work()
2172 free_rom_bus = ohci->config_rom_bus; in bus_reset_work()
2174 ohci->config_rom = ohci->next_config_rom; in bus_reset_work()
2175 ohci->config_rom_bus = ohci->next_config_rom_bus; in bus_reset_work()
2176 ohci->next_config_rom = NULL; in bus_reset_work()
2181 reg_write(ohci, OHCI1394_BusOptions, be32_to_cpu(ohci->config_rom[2])); in bus_reset_work()
2182 ohci->config_rom[0] = ohci->next_header; in bus_reset_work()
2183 reg_write(ohci, OHCI1394_ConfigROMhdr, be32_to_cpu(ohci->next_header)); in bus_reset_work()
2193 dmam_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, free_rom, free_rom_bus); in bus_reset_work()
2197 fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation, in bus_reset_work()
2198 self_id_count, ohci->self_id_buffer, in bus_reset_work()
2199 ohci->csr_state_setclear_abdicate); in bus_reset_work()
2200 ohci->csr_state_setclear_abdicate = false; in bus_reset_work()
2215 dev_notice_ratelimited(ohci->card.device, in irq_handler()
2225 trace_irqs(ohci->card.index, event); in irq_handler()
2235 trace_self_id_complete(ohci->card.index, reg, ohci->self_id, in irq_handler()
2238 queue_work(selfid_workqueue, &ohci->bus_reset_work); in irq_handler()
2242 tasklet_schedule(&ohci->ar_request_ctx.tasklet); in irq_handler()
2245 tasklet_schedule(&ohci->ar_response_ctx.tasklet); in irq_handler()
2248 tasklet_schedule(&ohci->at_request_ctx.tasklet); in irq_handler()
2251 tasklet_schedule(&ohci->at_response_ctx.tasklet); in irq_handler()
2258 i = ffs(iso_event) - 1; in irq_handler()
2259 fw_iso_context_schedule_flush_completions(&ohci->ir_context_list[i].base); in irq_handler()
2269 i = ffs(iso_event) - 1; in irq_handler()
2270 fw_iso_context_schedule_flush_completions(&ohci->it_context_list[i].base); in irq_handler()
2283 dev_err_ratelimited(ohci->card.device, "PCI posted write error\n"); in irq_handler()
2287 dev_notice_ratelimited(ohci->card.device, "isochronous cycle too long\n"); in irq_handler()
2299 dev_notice_ratelimited(ohci->card.device, "isochronous cycle inconsistent\n"); in irq_handler()
2306 guard(spinlock)(&ohci->lock); in irq_handler()
2323 return -ENODEV; /* Card was ejected. */ in software_reset()
2331 return -EBUSY; in software_reset()
2340 memset(&dest[length], 0, CONFIG_ROM_SIZE - size); in copy_config_rom()
2366 if (ohci->quirks & QUIRK_NO_1394A) in configure_1394a_enhancements()
2406 for (i = ARRAY_SIZE(id) - 1; i >= 0; i--) { in probe_tsb41ba3d()
2455 return -EIO; in ohci_enable()
2458 if (ohci->quirks & QUIRK_TI_SLLZ059) { in ohci_enable()
2465 ohci->quirks &= ~QUIRK_TI_SLLZ059; in ohci_enable()
2471 reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus); in ohci_enable()
2482 ohci->bus_time_running = false; in ohci_enable()
2485 if (ohci->ir_context_support & (1 << i)) in ohci_enable()
2493 card->broadcast_channel_auto_allocated = true; in ohci_enable()
2498 ohci->pri_req_max = reg_read(ohci, OHCI1394_FairnessControl) & 0x3f; in ohci_enable()
2500 card->priority_budget_implemented = ohci->pri_req_max != 0; in ohci_enable()
2521 * link, so we have a valid config rom before enabling - the in ohci_enable()
2535 ohci->next_config_rom = dmam_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE, in ohci_enable()
2536 &ohci->next_config_rom_bus, GFP_KERNEL); in ohci_enable()
2537 if (ohci->next_config_rom == NULL) in ohci_enable()
2538 return -ENOMEM; in ohci_enable()
2540 copy_config_rom(ohci->next_config_rom, config_rom, length); in ohci_enable()
2546 ohci->next_config_rom = ohci->config_rom; in ohci_enable()
2547 ohci->next_config_rom_bus = ohci->config_rom_bus; in ohci_enable()
2550 ohci->next_header = ohci->next_config_rom[0]; in ohci_enable()
2551 ohci->next_config_rom[0] = 0; in ohci_enable()
2554 be32_to_cpu(ohci->next_config_rom[2])); in ohci_enable()
2555 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus); in ohci_enable()
2580 ar_context_run(&ohci->ar_request_ctx); in ohci_enable()
2581 ar_context_run(&ohci->ar_response_ctx); in ohci_enable()
2586 fw_schedule_bus_reset(&ohci->card, false, true); in ohci_enable()
2623 * We use ohci->lock to avoid racing with the code that sets in ohci_set_config_rom()
2624 * ohci->next_config_rom to NULL (see bus_reset_work). in ohci_set_config_rom()
2627 next_config_rom = dmam_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE, in ohci_set_config_rom()
2630 return -ENOMEM; in ohci_set_config_rom()
2632 scoped_guard(spinlock_irq, &ohci->lock) { in ohci_set_config_rom()
2634 // into the ohci->next_config_rom and then mark the local variable as null so that in ohci_set_config_rom()
2639 if (ohci->next_config_rom == NULL) { in ohci_set_config_rom()
2640 ohci->next_config_rom = next_config_rom; in ohci_set_config_rom()
2641 ohci->next_config_rom_bus = next_config_rom_bus; in ohci_set_config_rom()
2645 copy_config_rom(ohci->next_config_rom, config_rom, length); in ohci_set_config_rom()
2647 ohci->next_header = config_rom[0]; in ohci_set_config_rom()
2648 ohci->next_config_rom[0] = 0; in ohci_set_config_rom()
2650 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus); in ohci_set_config_rom()
2655 dmam_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, next_config_rom, in ohci_set_config_rom()
2667 fw_schedule_bus_reset(&ohci->card, true, true); in ohci_set_config_rom()
2676 at_context_transmit(&ohci->at_request_ctx, packet); in ohci_send_request()
2683 at_context_transmit(&ohci->at_response_ctx, packet); in ohci_send_response()
2689 struct context *ctx = &ohci->at_request_ctx; in ohci_cancel_packet()
2690 struct driver_data *driver_data = packet->driver_data; in ohci_cancel_packet()
2691 int ret = -ENOENT; in ohci_cancel_packet()
2693 tasklet_disable_in_atomic(&ctx->tasklet); in ohci_cancel_packet()
2695 if (packet->ack != 0) in ohci_cancel_packet()
2698 if (packet->payload_mapped) in ohci_cancel_packet()
2699 dma_unmap_single(ohci->card.device, packet->payload_bus, in ohci_cancel_packet()
2700 packet->payload_length, DMA_TO_DEVICE); in ohci_cancel_packet()
2702 log_ar_at_event(ohci, 'T', packet->speed, packet->header, 0x20); in ohci_cancel_packet()
2703 driver_data->packet = NULL; in ohci_cancel_packet()
2704 packet->ack = RCODE_CANCELLED; in ohci_cancel_packet()
2707 packet->timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ohci)); in ohci_cancel_packet()
2709 packet->callback(packet, &ohci->card, packet->ack); in ohci_cancel_packet()
2712 tasklet_enable(&ctx->tasklet); in ohci_cancel_packet()
2731 guard(spinlock_irqsave)(&ohci->lock); in ohci_enable_phys_dma()
2733 if (ohci->generation != generation) in ohci_enable_phys_dma()
2734 return -ESTALE; in ohci_enable_phys_dma()
2737 * Note, if the node ID contains a non-local bus ID, physical DMA is in ohci_enable_phys_dma()
2745 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 1 << (n - 32)); in ohci_enable_phys_dma()
2760 if (ohci->is_root && in ohci_read_csr()
2766 if (ohci->csr_state_setclear_abdicate) in ohci_read_csr()
2783 guard(spinlock_irqsave)(&ohci->lock); in ohci_read_csr()
2792 (ohci->pri_req_max << 8); in ohci_read_csr()
2806 if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) { in ohci_write_csr()
2812 ohci->csr_state_setclear_abdicate = false; in ohci_write_csr()
2816 if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) { in ohci_write_csr()
2822 ohci->csr_state_setclear_abdicate = true; in ohci_write_csr()
2839 guard(spinlock_irqsave)(&ohci->lock); in ohci_write_csr()
2840 ohci->bus_time = (update_bus_time(ohci) & 0x40) | (value & ~0x7f); in ohci_write_csr()
2863 trace_isoc_inbound_single_completions(&ctx->base, ctx->last_timestamp, cause, ctx->header, in flush_iso_completions()
2864 ctx->header_length); in flush_iso_completions()
2865 trace_isoc_outbound_completions(&ctx->base, ctx->last_timestamp, cause, ctx->header, in flush_iso_completions()
2866 ctx->header_length); in flush_iso_completions()
2868 ctx->base.callback.sc(&ctx->base, ctx->last_timestamp, in flush_iso_completions()
2869 ctx->header_length, ctx->header, in flush_iso_completions()
2870 ctx->base.callback_data); in flush_iso_completions()
2871 ctx->header_length = 0; in flush_iso_completions()
2878 if (ctx->header_length + ctx->base.header_size > PAGE_SIZE) { in copy_iso_headers()
2879 if (ctx->base.drop_overflow_headers) in copy_iso_headers()
2884 ctx_hdr = ctx->header + ctx->header_length; in copy_iso_headers()
2885 ctx->last_timestamp = (u16)le32_to_cpu((__force __le32)dma_hdr[0]); in copy_iso_headers()
2892 if (ctx->base.header_size > 0) in copy_iso_headers()
2894 if (ctx->base.header_size > 4) in copy_iso_headers()
2896 if (ctx->base.header_size > 8) in copy_iso_headers()
2897 memcpy(&ctx_hdr[2], &dma_hdr[2], ctx->base.header_size - 8); in copy_iso_headers()
2898 ctx->header_length += ctx->base.header_size; in copy_iso_headers()
2911 if (pd->transfer_status) in handle_ir_packet_per_buffer()
2917 while (!(d->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))) { in handle_ir_packet_per_buffer()
2919 buffer_dma = le32_to_cpu(d->data_address); in handle_ir_packet_per_buffer()
2920 dma_sync_single_range_for_cpu(context->ohci->card.device, in handle_ir_packet_per_buffer()
2923 le16_to_cpu(d->req_count), in handle_ir_packet_per_buffer()
2929 if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS)) in handle_ir_packet_per_buffer()
2945 req_count = le16_to_cpu(last->req_count); in handle_ir_buffer_fill()
2946 res_count = le16_to_cpu(READ_ONCE(last->res_count)); in handle_ir_buffer_fill()
2947 completed = req_count - res_count; in handle_ir_buffer_fill()
2948 buffer_dma = le32_to_cpu(last->data_address); in handle_ir_buffer_fill()
2951 ctx->mc_buffer_bus = buffer_dma; in handle_ir_buffer_fill()
2952 ctx->mc_completed = completed; in handle_ir_buffer_fill()
2959 dma_sync_single_range_for_cpu(context->ohci->card.device, in handle_ir_buffer_fill()
2964 if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS)) { in handle_ir_buffer_fill()
2965 trace_isoc_inbound_multiple_completions(&ctx->base, completed, in handle_ir_buffer_fill()
2968 ctx->base.callback.mc(&ctx->base, in handle_ir_buffer_fill()
2970 ctx->base.callback_data); in handle_ir_buffer_fill()
2971 ctx->mc_completed = 0; in handle_ir_buffer_fill()
2979 dma_sync_single_range_for_cpu(ctx->context.ohci->card.device, in flush_ir_buffer_fill()
2980 ctx->mc_buffer_bus & PAGE_MASK, in flush_ir_buffer_fill()
2981 ctx->mc_buffer_bus & ~PAGE_MASK, in flush_ir_buffer_fill()
2982 ctx->mc_completed, DMA_FROM_DEVICE); in flush_ir_buffer_fill()
2984 trace_isoc_inbound_multiple_completions(&ctx->base, ctx->mc_completed, in flush_ir_buffer_fill()
2987 ctx->base.callback.mc(&ctx->base, in flush_ir_buffer_fill()
2988 ctx->mc_buffer_bus + ctx->mc_completed, in flush_ir_buffer_fill()
2989 ctx->base.callback_data); in flush_ir_buffer_fill()
2990 ctx->mc_completed = 0; in flush_ir_buffer_fill()
3000 if (pd->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS)) in sync_it_packet_for_cpu()
3011 if ((le32_to_cpu(pd->data_address) & PAGE_MASK) == in sync_it_packet_for_cpu()
3012 (context->current_bus & PAGE_MASK)) { in sync_it_packet_for_cpu()
3013 if (pd->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS)) in sync_it_packet_for_cpu()
3019 buffer_dma = le32_to_cpu(pd->data_address); in sync_it_packet_for_cpu()
3020 dma_sync_single_range_for_cpu(context->ohci->card.device, in sync_it_packet_for_cpu()
3023 le16_to_cpu(pd->req_count), in sync_it_packet_for_cpu()
3025 control = pd->control; in sync_it_packet_for_cpu()
3040 if (pd->transfer_status) in handle_it_packet()
3048 if (ctx->header_length + 4 > PAGE_SIZE) { in handle_it_packet()
3049 if (ctx->base.drop_overflow_headers) in handle_it_packet()
3054 ctx_hdr = ctx->header + ctx->header_length; in handle_it_packet()
3055 ctx->last_timestamp = le16_to_cpu(last->res_count); in handle_it_packet()
3056 /* Present this value as big-endian to match the receive code */ in handle_it_packet()
3057 *ctx_hdr = cpu_to_be32((le16_to_cpu(pd->transfer_status) << 16) | in handle_it_packet()
3058 le16_to_cpu(pd->res_count)); in handle_it_packet()
3059 ctx->header_length += 4; in handle_it_packet()
3061 if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS)) in handle_it_packet()
3075 ohci->mc_channels = channels; in set_multichannel_mask()
3086 int index, ret = -EBUSY; in ohci_allocate_iso_context()
3088 scoped_guard(spinlock_irq, &ohci->lock) { in ohci_allocate_iso_context()
3091 mask = &ohci->it_context_mask; in ohci_allocate_iso_context()
3093 index = ffs(*mask) - 1; in ohci_allocate_iso_context()
3097 ctx = &ohci->it_context_list[index]; in ohci_allocate_iso_context()
3102 channels = &ohci->ir_context_channels; in ohci_allocate_iso_context()
3103 mask = &ohci->ir_context_mask; in ohci_allocate_iso_context()
3105 index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1; in ohci_allocate_iso_context()
3110 ctx = &ohci->ir_context_list[index]; in ohci_allocate_iso_context()
3115 mask = &ohci->ir_context_mask; in ohci_allocate_iso_context()
3117 index = !ohci->mc_allocated ? ffs(*mask) - 1 : -1; in ohci_allocate_iso_context()
3119 ohci->mc_allocated = true; in ohci_allocate_iso_context()
3122 ctx = &ohci->ir_context_list[index]; in ohci_allocate_iso_context()
3127 index = -1; in ohci_allocate_iso_context()
3128 ret = -ENOSYS; in ohci_allocate_iso_context()
3136 ctx->header_length = 0; in ohci_allocate_iso_context()
3137 ctx->header = (void *) __get_free_page(GFP_KERNEL); in ohci_allocate_iso_context()
3138 if (ctx->header == NULL) { in ohci_allocate_iso_context()
3139 ret = -ENOMEM; in ohci_allocate_iso_context()
3142 ret = context_init(&ctx->context, ohci, regs, callback); in ohci_allocate_iso_context()
3145 fw_iso_context_init_work(&ctx->base, ohci_isoc_context_work); in ohci_allocate_iso_context()
3149 ctx->mc_completed = 0; in ohci_allocate_iso_context()
3152 return &ctx->base; in ohci_allocate_iso_context()
3155 free_page((unsigned long)ctx->header); in ohci_allocate_iso_context()
3157 scoped_guard(spinlock_irq, &ohci->lock) { in ohci_allocate_iso_context()
3164 ohci->mc_allocated = false; in ohci_allocate_iso_context()
3177 struct fw_ohci *ohci = ctx->context.ohci; in ohci_start_iso()
3182 if (ctx->context.last->branch_address == 0) in ohci_start_iso()
3183 return -ENODATA; in ohci_start_iso()
3185 switch (ctx->base.type) { in ohci_start_iso()
3187 index = ctx - ohci->it_context_list; in ohci_start_iso()
3195 context_run(&ctx->context, match); in ohci_start_iso()
3202 index = ctx - ohci->ir_context_list; in ohci_start_iso()
3203 match = (tags << 28) | (sync << 8) | ctx->base.channel; in ohci_start_iso()
3211 reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match); in ohci_start_iso()
3212 context_run(&ctx->context, control); in ohci_start_iso()
3214 ctx->sync = sync; in ohci_start_iso()
3215 ctx->tags = tags; in ohci_start_iso()
3225 struct fw_ohci *ohci = fw_ohci(base->card); in ohci_stop_iso()
3229 switch (ctx->base.type) { in ohci_stop_iso()
3231 index = ctx - ohci->it_context_list; in ohci_stop_iso()
3237 index = ctx - ohci->ir_context_list; in ohci_stop_iso()
3242 context_stop(&ctx->context); in ohci_stop_iso()
3249 struct fw_ohci *ohci = fw_ohci(base->card); in ohci_free_iso_context()
3254 context_release(&ctx->context); in ohci_free_iso_context()
3255 free_page((unsigned long)ctx->header); in ohci_free_iso_context()
3257 guard(spinlock_irqsave)(&ohci->lock); in ohci_free_iso_context()
3259 switch (base->type) { in ohci_free_iso_context()
3261 index = ctx - ohci->it_context_list; in ohci_free_iso_context()
3262 ohci->it_context_mask |= 1 << index; in ohci_free_iso_context()
3266 index = ctx - ohci->ir_context_list; in ohci_free_iso_context()
3267 ohci->ir_context_mask |= 1 << index; in ohci_free_iso_context()
3268 ohci->ir_context_channels |= 1ULL << base->channel; in ohci_free_iso_context()
3272 index = ctx - ohci->ir_context_list; in ohci_free_iso_context()
3273 ohci->ir_context_mask |= 1 << index; in ohci_free_iso_context()
3274 ohci->ir_context_channels |= ohci->mc_channels; in ohci_free_iso_context()
3275 ohci->mc_channels = 0; in ohci_free_iso_context()
3276 ohci->mc_allocated = false; in ohci_free_iso_context()
3283 struct fw_ohci *ohci = fw_ohci(base->card); in ohci_set_iso_channels()
3285 switch (base->type) { in ohci_set_iso_channels()
3288 guard(spinlock_irqsave)(&ohci->lock); in ohci_set_iso_channels()
3291 if (~ohci->ir_context_channels & ~ohci->mc_channels & *channels) { in ohci_set_iso_channels()
3292 *channels = ohci->ir_context_channels; in ohci_set_iso_channels()
3293 return -EBUSY; in ohci_set_iso_channels()
3300 return -EINVAL; in ohci_set_iso_channels()
3310 for (i = 0 ; i < ohci->n_ir ; i++) { in ohci_resume_iso_dma()
3311 ctx = &ohci->ir_context_list[i]; in ohci_resume_iso_dma()
3312 if (ctx->context.running) in ohci_resume_iso_dma()
3313 ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags); in ohci_resume_iso_dma()
3316 for (i = 0 ; i < ohci->n_it ; i++) { in ohci_resume_iso_dma()
3317 ctx = &ohci->it_context_list[i]; in ohci_resume_iso_dma()
3318 if (ctx->context.running) in ohci_resume_iso_dma()
3319 ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags); in ohci_resume_iso_dma()
3340 if (p->skip) in queue_iso_transmit()
3344 if (p->header_length > 0) in queue_iso_transmit()
3348 end_page = PAGE_ALIGN(payload_index + p->payload_length) >> PAGE_SHIFT; in queue_iso_transmit()
3349 if (p->payload_length > 0) in queue_iso_transmit()
3350 payload_z = end_page - (payload_index >> PAGE_SHIFT); in queue_iso_transmit()
3357 header_z = DIV_ROUND_UP(p->header_length, sizeof(*d)); in queue_iso_transmit()
3359 d = context_get_descriptors(&ctx->context, z + header_z, &d_bus); in queue_iso_transmit()
3361 return -ENOMEM; in queue_iso_transmit()
3363 if (!p->skip) { in queue_iso_transmit()
3371 * FIXME: Make the context's cycle-lost behaviour configurable? in queue_iso_transmit()
3377 ohci1394_it_data_set_speed(header, ctx->base.speed); in queue_iso_transmit()
3378 ohci1394_it_data_set_tag(header, p->tag); in queue_iso_transmit()
3379 ohci1394_it_data_set_channel(header, ctx->base.channel); in queue_iso_transmit()
3381 ohci1394_it_data_set_sync(header, p->sy); in queue_iso_transmit()
3383 ohci1394_it_data_set_data_length(header, p->header_length + p->payload_length); in queue_iso_transmit()
3386 if (p->header_length > 0) { in queue_iso_transmit()
3387 d[2].req_count = cpu_to_le16(p->header_length); in queue_iso_transmit()
3389 memcpy(&d[z], p->header, p->header_length); in queue_iso_transmit()
3392 pd = d + z - payload_z; in queue_iso_transmit()
3393 payload_end_index = payload_index + p->payload_length; in queue_iso_transmit()
3399 min(next_page_index, payload_end_index) - payload_index; in queue_iso_transmit()
3402 page_bus = page_private(buffer->pages[page]); in queue_iso_transmit()
3405 dma_sync_single_range_for_device(ctx->context.ohci->card.device, in queue_iso_transmit()
3412 if (p->interrupt) in queue_iso_transmit()
3417 last = z == 2 ? d : d + z - 1; in queue_iso_transmit()
3418 last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST | in queue_iso_transmit()
3423 context_append(&ctx->context, d, z, header_z); in queue_iso_transmit()
3433 struct device *device = ctx->context.ohci->card.device; in queue_iso_packet_per_buffer()
3444 packet_count = packet->header_length / ctx->base.header_size; in queue_iso_packet_per_buffer()
3445 header_size = max(ctx->base.header_size, (size_t)8); in queue_iso_packet_per_buffer()
3451 payload_per_buffer = packet->payload_length / packet_count; in queue_iso_packet_per_buffer()
3456 d = context_get_descriptors(&ctx->context, in queue_iso_packet_per_buffer()
3459 return -ENOMEM; in queue_iso_packet_per_buffer()
3461 d->control = cpu_to_le16(DESCRIPTOR_STATUS | in queue_iso_packet_per_buffer()
3463 if (packet->skip && i == 0) in queue_iso_packet_per_buffer()
3464 d->control |= cpu_to_le16(DESCRIPTOR_WAIT); in queue_iso_packet_per_buffer()
3465 d->req_count = cpu_to_le16(header_size); in queue_iso_packet_per_buffer()
3466 d->res_count = d->req_count; in queue_iso_packet_per_buffer()
3467 d->transfer_status = 0; in queue_iso_packet_per_buffer()
3468 d->data_address = cpu_to_le32(d_bus + (z * sizeof(*d))); in queue_iso_packet_per_buffer()
3474 pd->control = cpu_to_le16(DESCRIPTOR_STATUS | in queue_iso_packet_per_buffer()
3480 length = PAGE_SIZE - offset; in queue_iso_packet_per_buffer()
3481 pd->req_count = cpu_to_le16(length); in queue_iso_packet_per_buffer()
3482 pd->res_count = pd->req_count; in queue_iso_packet_per_buffer()
3483 pd->transfer_status = 0; in queue_iso_packet_per_buffer()
3485 page_bus = page_private(buffer->pages[page]); in queue_iso_packet_per_buffer()
3486 pd->data_address = cpu_to_le32(page_bus + offset); in queue_iso_packet_per_buffer()
3493 rest -= length; in queue_iso_packet_per_buffer()
3497 pd->control = cpu_to_le16(DESCRIPTOR_STATUS | in queue_iso_packet_per_buffer()
3500 if (packet->interrupt && i == packet_count - 1) in queue_iso_packet_per_buffer()
3501 pd->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS); in queue_iso_packet_per_buffer()
3503 context_append(&ctx->context, d, z, header_z); in queue_iso_packet_per_buffer()
3520 rest = packet->payload_length; in queue_iso_buffer_fill()
3525 if (WARN_ON(offset & 3 || rest & 3 || page + z > buffer->page_count)) in queue_iso_buffer_fill()
3526 return -EFAULT; in queue_iso_buffer_fill()
3529 d = context_get_descriptors(&ctx->context, 1, &d_bus); in queue_iso_buffer_fill()
3531 return -ENOMEM; in queue_iso_buffer_fill()
3533 d->control = cpu_to_le16(DESCRIPTOR_INPUT_MORE | in queue_iso_buffer_fill()
3535 if (packet->skip && i == 0) in queue_iso_buffer_fill()
3536 d->control |= cpu_to_le16(DESCRIPTOR_WAIT); in queue_iso_buffer_fill()
3537 if (packet->interrupt && i == z - 1) in queue_iso_buffer_fill()
3538 d->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS); in queue_iso_buffer_fill()
3543 length = PAGE_SIZE - offset; in queue_iso_buffer_fill()
3544 d->req_count = cpu_to_le16(length); in queue_iso_buffer_fill()
3545 d->res_count = d->req_count; in queue_iso_buffer_fill()
3546 d->transfer_status = 0; in queue_iso_buffer_fill()
3548 page_bus = page_private(buffer->pages[page]); in queue_iso_buffer_fill()
3549 d->data_address = cpu_to_le32(page_bus + offset); in queue_iso_buffer_fill()
3551 dma_sync_single_range_for_device(ctx->context.ohci->card.device, in queue_iso_buffer_fill()
3555 rest -= length; in queue_iso_buffer_fill()
3559 context_append(&ctx->context, d, 1, 0); in queue_iso_buffer_fill()
3572 guard(spinlock_irqsave)(&ctx->context.ohci->lock); in ohci_queue_iso()
3574 switch (base->type) { in ohci_queue_iso()
3582 return -ENOSYS; in ohci_queue_iso()
3589 &container_of(base, struct iso_context, base)->context; in ohci_flush_queue_iso()
3591 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE); in ohci_flush_queue_iso()
3599 if (!test_and_set_bit_lock(0, &ctx->flushing_completions)) { in ohci_flush_iso_completions()
3600 ohci_isoc_context_work(&base->work); in ohci_flush_iso_completions()
3602 switch (base->type) { in ohci_flush_iso_completions()
3605 if (ctx->header_length != 0) in ohci_flush_iso_completions()
3609 if (ctx->mc_completed != 0) in ohci_flush_iso_completions()
3613 ret = -ENOSYS; in ohci_flush_iso_completions()
3616 clear_bit_unlock(0, &ctx->flushing_completions); in ohci_flush_iso_completions()
3681 ar_context_release(&ohci->ar_response_ctx); in release_ohci()
3682 ar_context_release(&ohci->ar_request_ctx); in release_ohci()
3684 dev_notice(dev, "removed fw-ohci device\n"); in release_ohci()
3696 if (dev->vendor == PCI_VENDOR_ID_PINNACLE_SYSTEMS) { in pci_probe()
3697 dev_err(&dev->dev, "Pinnacle MovieBoard is not yet supported\n"); in pci_probe()
3698 return -ENOSYS; in pci_probe()
3703 return -ENOMEM; in pci_probe()
3704 fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev); in pci_probe()
3707 devres_add(&dev->dev, ohci); in pci_probe()
3711 dev_err(&dev->dev, "failed to enable OHCI hardware\n"); in pci_probe()
3718 spin_lock_init(&ohci->lock); in pci_probe()
3719 mutex_init(&ohci->phy_reg_mutex); in pci_probe()
3721 INIT_WORK(&ohci->bus_reset_work, bus_reset_work); in pci_probe()
3726 return -ENXIO; in pci_probe()
3732 return -ENXIO; in pci_probe()
3734 ohci->registers = pcim_iomap_table(dev)[0]; in pci_probe()
3737 if ((ohci_quirks[i].vendor == dev->vendor) && in pci_probe()
3739 ohci_quirks[i].device == dev->device) && in pci_probe()
3740 (ohci_quirks[i].revision == (unsigned short)PCI_ANY_ID || in pci_probe()
3741 ohci_quirks[i].revision >= dev->revision)) { in pci_probe()
3742 ohci->quirks = ohci_quirks[i].flags; in pci_probe()
3746 ohci->quirks = param_quirks; in pci_probe()
3749 ohci->quirks |= QUIRK_REBOOT_BY_CYCLE_TIMER_READ; in pci_probe()
3758 ohci->misc_buffer = dmam_alloc_coherent(&dev->dev, PAGE_SIZE, &ohci->misc_buffer_bus, in pci_probe()
3760 if (!ohci->misc_buffer) in pci_probe()
3761 return -ENOMEM; in pci_probe()
3763 err = ar_context_init(&ohci->ar_request_ctx, ohci, 0, in pci_probe()
3768 err = ar_context_init(&ohci->ar_response_ctx, ohci, PAGE_SIZE/4, in pci_probe()
3773 err = context_init(&ohci->at_request_ctx, ohci, in pci_probe()
3778 err = context_init(&ohci->at_response_ctx, ohci, in pci_probe()
3784 ohci->ir_context_channels = ~0ULL; in pci_probe()
3785 ohci->ir_context_support = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet); in pci_probe()
3787 ohci->ir_context_mask = ohci->ir_context_support; in pci_probe()
3788 ohci->n_ir = hweight32(ohci->ir_context_mask); in pci_probe()
3789 size = sizeof(struct iso_context) * ohci->n_ir; in pci_probe()
3790 ohci->ir_context_list = devm_kzalloc(&dev->dev, size, GFP_KERNEL); in pci_probe()
3791 if (!ohci->ir_context_list) in pci_probe()
3792 return -ENOMEM; in pci_probe()
3795 ohci->it_context_support = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet); in pci_probe()
3797 if (!ohci->it_context_support) { in pci_probe()
3799 ohci->it_context_support = 0xf; in pci_probe()
3802 ohci->it_context_mask = ohci->it_context_support; in pci_probe()
3803 ohci->n_it = hweight32(ohci->it_context_mask); in pci_probe()
3804 size = sizeof(struct iso_context) * ohci->n_it; in pci_probe()
3805 ohci->it_context_list = devm_kzalloc(&dev->dev, size, GFP_KERNEL); in pci_probe()
3806 if (!ohci->it_context_list) in pci_probe()
3807 return -ENOMEM; in pci_probe()
3809 ohci->self_id = ohci->misc_buffer + PAGE_SIZE/2; in pci_probe()
3810 ohci->self_id_bus = ohci->misc_buffer_bus + PAGE_SIZE/2; in pci_probe()
3819 if (!(ohci->quirks & QUIRK_NO_MSI)) in pci_probe()
3838 err = fw_card_add(&ohci->card, max_receive, link_speed, guid, ohci->n_it + ohci->n_ir); in pci_probe()
3846 version >> 16, version & 0xff, ohci->card.index, in pci_probe()
3847 ohci->n_ir, ohci->n_it, ohci->quirks, in pci_probe()
3874 cancel_work_sync(&ohci->bus_reset_work); in pci_remove()
3875 fw_core_remove_card(&ohci->card); in pci_remove()
3889 dev_notice(&dev->dev, "removing fw-ohci device\n"); in pci_remove()
3929 reg_write(ohci, OHCI1394_GUIDLo, (u32)ohci->card.guid); in pci_resume()
3930 reg_write(ohci, OHCI1394_GUIDHi, (u32)(ohci->card.guid >> 32)); in pci_resume()
3933 err = ohci_enable(&ohci->card, NULL, 0); in pci_resume()
3965 return -ENOMEM; in fw_ohci_init()
3983 /* Provide a module alias so root-on-sbp2 initrds don't break. */