Lines Matching refs:ihost
179 static bool sci_controller_completion_queue_has_entries(struct isci_host *ihost)
181 u32 get_value = ihost->completion_queue_get;
185 COMPLETION_QUEUE_CYCLE_BIT(ihost->completion_queue[get_index]))
191 static bool sci_controller_isr(struct isci_host *ihost)
193 if (sci_controller_completion_queue_has_entries(ihost))
200 writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
208 spin_lock(&ihost->scic_lock);
209 if (test_bit(IHOST_IRQ_ENABLED, &ihost->flags)) {
210 writel(0xFF000000, &ihost->smu_registers->interrupt_mask);
211 writel(0, &ihost->smu_registers->interrupt_mask);
213 spin_unlock(&ihost->scic_lock);
220 struct isci_host *ihost = data;
222 if (sci_controller_isr(ihost))
223 tasklet_schedule(&ihost->completion_tasklet);
228 static bool sci_controller_error_isr(struct isci_host *ihost)
233 readl(&ihost->smu_registers->interrupt_status);
249 writel(0xff, &ihost->smu_registers->interrupt_mask);
250 writel(0, &ihost->smu_registers->interrupt_mask);
255 static void sci_controller_task_completion(struct isci_host *ihost, u32 ent)
258 struct isci_request *ireq = ihost->reqs[index];
263 ISCI_TAG_SEQ(ireq->io_tag) == ihost->io_request_sequence[index])
270 static void sci_controller_sdma_completion(struct isci_host *ihost, u32 ent)
281 ireq = ihost->reqs[index];
282 dev_warn(&ihost->pdev->dev, "%s: %x for io request %p\n",
291 idev = ihost->device_table[index];
292 dev_warn(&ihost->pdev->dev, "%s: %x for device %p\n",
299 dev_warn(&ihost->pdev->dev, "%s: unknown completion type %x\n",
305 static void sci_controller_unsolicited_frame(struct isci_host *ihost, u32 ent)
318 frame_header = ihost->uf_control.buffers.array[frame_index].header;
319 ihost->uf_control.buffers.array[frame_index].state = UNSOLICITED_FRAME_IN_USE;
326 sci_controller_release_frame(ihost, frame_index);
332 iphy = &ihost->phys[index];
344 iphy = &ihost->phys[index];
347 if (index < ihost->remote_node_entries)
348 idev = ihost->device_table[index];
355 sci_controller_release_frame(ihost, frame_index);
366 static void sci_controller_event_completion(struct isci_host *ihost, u32 ent)
378 dev_err(&ihost->pdev->dev,
382 ihost,
392 dev_err(&ihost->pdev->dev,
396 ihost,
401 ireq = ihost->reqs[index];
409 ireq = ihost->reqs[index];
413 dev_warn(&ihost->pdev->dev,
418 ihost,
424 idev = ihost->device_table[index];
428 dev_warn(&ihost->pdev->dev,
433 ihost,
450 iphy = &ihost->phys[index];
457 if (index < ihost->remote_node_entries) {
458 idev = ihost->device_table[index];
463 dev_err(&ihost->pdev->dev,
468 ihost,
475 dev_warn(&ihost->pdev->dev,
483 static void sci_controller_process_completions(struct isci_host *ihost)
492 dev_dbg(&ihost->pdev->dev,
495 ihost->completion_queue_get);
498 get_index = NORMALIZE_GET_POINTER(ihost->completion_queue_get);
499 get_cycle = SMU_CQGR_CYCLE_BIT & ihost->completion_queue_get;
501 event_get = NORMALIZE_EVENT_POINTER(ihost->completion_queue_get);
502 event_cycle = SMU_CQGR_EVENT_CYCLE_BIT & ihost->completion_queue_get;
506 == COMPLETION_QUEUE_CYCLE_BIT(ihost->completion_queue[get_index])
510 ent = ihost->completion_queue[get_index];
517 dev_dbg(&ihost->pdev->dev,
524 sci_controller_task_completion(ihost, ent);
528 sci_controller_sdma_completion(ihost, ent);
532 sci_controller_unsolicited_frame(ihost, ent);
536 sci_controller_event_completion(ihost, ent);
544 sci_controller_event_completion(ihost, ent);
548 dev_warn(&ihost->pdev->dev,
559 ihost->completion_queue_get =
567 writel(ihost->completion_queue_get,
568 &ihost->smu_registers->completion_queue_get);
572 dev_dbg(&ihost->pdev->dev,
575 ihost->completion_queue_get);
579 static void sci_controller_error_handler(struct isci_host *ihost)
584 readl(&ihost->smu_registers->interrupt_status);
587 sci_controller_completion_queue_has_entries(ihost)) {
589 sci_controller_process_completions(ihost);
590 writel(SMU_ISR_QUEUE_SUSPEND, &ihost->smu_registers->interrupt_status);
592 dev_err(&ihost->pdev->dev, "%s: status: %#x\n", __func__,
595 sci_change_state(&ihost->sm, SCIC_FAILED);
603 writel(0, &ihost->smu_registers->interrupt_mask);
609 struct isci_host *ihost = data;
611 if (sci_controller_isr(ihost)) {
612 writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
613 tasklet_schedule(&ihost->completion_tasklet);
615 } else if (sci_controller_error_isr(ihost)) {
616 spin_lock(&ihost->scic_lock);
617 sci_controller_error_handler(ihost);
618 spin_unlock(&ihost->scic_lock);
627 struct isci_host *ihost = data;
629 if (sci_controller_error_isr(ihost))
630 sci_controller_error_handler(ihost);
638 * @ihost: This parameter specifies the ISCI host object
643 static void isci_host_start_complete(struct isci_host *ihost, enum sci_status completion_status)
646 dev_info(&ihost->pdev->dev,
648 clear_bit(IHOST_START_PENDING, &ihost->flags);
649 wake_up(&ihost->eventq);
655 struct isci_host *ihost = ha->lldd_ha;
657 if (test_bit(IHOST_START_PENDING, &ihost->flags))
671 * @ihost: the handle to the controller object for which to return the
677 static u32 sci_controller_get_suggested_start_timeout(struct isci_host *ihost)
680 if (!ihost)
702 static void sci_controller_enable_interrupts(struct isci_host *ihost)
704 set_bit(IHOST_IRQ_ENABLED, &ihost->flags);
705 writel(0, &ihost->smu_registers->interrupt_mask);
708 void sci_controller_disable_interrupts(struct isci_host *ihost)
710 clear_bit(IHOST_IRQ_ENABLED, &ihost->flags);
711 writel(0xffffffff, &ihost->smu_registers->interrupt_mask);
712 readl(&ihost->smu_registers->interrupt_mask); /* flush */
715 static void sci_controller_enable_port_task_scheduler(struct isci_host *ihost)
720 readl(&ihost->scu_registers->peg0.ptsg.control);
725 &ihost->scu_registers->peg0.ptsg.control);
728 static void sci_controller_assign_task_entries(struct isci_host *ihost)
738 readl(&ihost->smu_registers->task_context_assignment[0]);
741 (SMU_TCA_GEN_VAL(ENDING, ihost->task_context_entries - 1)) |
745 &ihost->smu_registers->task_context_assignment[0]);
749 static void sci_controller_initialize_completion_queue(struct isci_host *ihost)
756 ihost->completion_queue_get = 0;
763 &ihost->smu_registers->completion_queue_control);
775 &ihost->smu_registers->completion_queue_get);
784 &ihost->smu_registers->completion_queue_put);
792 ihost->completion_queue[index] = 0x80000000;
796 static void sci_controller_initialize_unsolicited_frame_queue(struct isci_host *ihost)
807 &ihost->scu_registers->sdma.unsolicited_frame_queue_control);
816 &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
820 &ihost->scu_registers->sdma.unsolicited_frame_put_pointer);
823 void sci_controller_transition_to_ready(struct isci_host *ihost, enum sci_status status)
825 if (ihost->sm.current_state_id == SCIC_STARTING) {
830 sci_change_state(&ihost->sm, SCIC_READY);
832 isci_host_start_complete(ihost, status);
859 bool is_controller_start_complete(struct isci_host *ihost)
864 struct isci_phy *iphy = &ihost->phys[i];
871 if (is_port_config_apc(ihost))
885 (ihost->port_agent.phy_ready_mask != ihost->port_agent.phy_configured_mask))
894 * @ihost: controller
900 static enum sci_status sci_controller_start_next_phy(struct isci_host *ihost)
902 struct sci_oem_params *oem = &ihost->oem_parameters;
908 if (ihost->phy_startup_timer_pending)
911 if (ihost->next_phy_to_start >= SCI_MAX_PHYS) {
912 if (is_controller_start_complete(ihost)) {
913 sci_controller_transition_to_ready(ihost, SCI_SUCCESS);
914 sci_del_timer(&ihost->phy_timer);
915 ihost->phy_startup_timer_pending = false;
918 iphy = &ihost->phys[ihost->next_phy_to_start];
922 ihost->next_phy_to_start++;
933 return sci_controller_start_next_phy(ihost);
940 sci_mod_timer(&ihost->phy_timer,
942 ihost->phy_startup_timer_pending = true;
944 dev_warn(&ihost->pdev->dev,
949 ihost->phys[ihost->next_phy_to_start].phy_index,
953 ihost->next_phy_to_start++;
962 struct isci_host *ihost = container_of(tmr, typeof(*ihost), phy_timer);
966 spin_lock_irqsave(&ihost->scic_lock, flags);
971 ihost->phy_startup_timer_pending = false;
974 status = sci_controller_start_next_phy(ihost);
978 spin_unlock_irqrestore(&ihost->scic_lock, flags);
981 static u16 isci_tci_active(struct isci_host *ihost)
983 return CIRC_CNT(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS);
986 static enum sci_status sci_controller_start(struct isci_host *ihost,
992 if (ihost->sm.current_state_id != SCIC_INITIALIZED) {
993 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
994 __func__, ihost->sm.current_state_id);
999 BUILD_BUG_ON(SCI_MAX_IO_REQUESTS > 1 << sizeof(ihost->tci_pool[0]) * 8);
1000 ihost->tci_head = 0;
1001 ihost->tci_tail = 0;
1002 for (index = 0; index < ihost->task_context_entries; index++)
1003 isci_tci_free(ihost, index);
1006 sci_remote_node_table_initialize(&ihost->available_remote_nodes,
1007 ihost->remote_node_entries);
1013 sci_controller_disable_interrupts(ihost);
1016 sci_controller_enable_port_task_scheduler(ihost);
1018 /* Assign all the task entries to ihost physical function */
1019 sci_controller_assign_task_entries(ihost);
1022 sci_controller_initialize_completion_queue(ihost);
1025 sci_controller_initialize_unsolicited_frame_queue(ihost);
1028 for (index = 0; index < ihost->logical_port_entries; index++) {
1029 struct isci_port *iport = &ihost->ports[index];
1036 sci_controller_start_next_phy(ihost);
1038 sci_mod_timer(&ihost->timer, timeout);
1040 sci_change_state(&ihost->sm, SCIC_STARTING);
1047 struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha;
1048 unsigned long tmo = sci_controller_get_suggested_start_timeout(ihost);
1050 set_bit(IHOST_START_PENDING, &ihost->flags);
1052 spin_lock_irq(&ihost->scic_lock);
1053 sci_controller_start(ihost, tmo);
1054 sci_controller_enable_interrupts(ihost);
1055 spin_unlock_irq(&ihost->scic_lock);
1058 static void isci_host_stop_complete(struct isci_host *ihost)
1060 sci_controller_disable_interrupts(ihost);
1061 clear_bit(IHOST_STOP_PENDING, &ihost->flags);
1062 wake_up(&ihost->eventq);
1065 static void sci_controller_completion_handler(struct isci_host *ihost)
1068 if (sci_controller_completion_queue_has_entries(ihost))
1069 sci_controller_process_completions(ihost);
1072 writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
1074 writel(0xFF000000, &ihost->smu_registers->interrupt_mask);
1075 writel(0, &ihost->smu_registers->interrupt_mask);
1078 void ireq_done(struct isci_host *ihost, struct isci_request *ireq, struct sas_task *task)
1084 dev_dbg(&ihost->pdev->dev,
1090 dev_dbg(&ihost->pdev->dev,
1101 wake_up_all(&ihost->eventq);
1104 isci_free_tag(ihost, ireq->io_tag);
1116 struct isci_host *ihost = (struct isci_host *)data;
1119 spin_lock_irq(&ihost->scic_lock);
1120 sci_controller_completion_handler(ihost);
1121 spin_unlock_irq(&ihost->scic_lock);
1127 active = isci_tci_active(ihost) - SCI_MAX_PORTS;
1135 &ihost->smu_registers->interrupt_coalesce_control);
1146 * @ihost: the handle to the controller object to stop.
1156 static enum sci_status sci_controller_stop(struct isci_host *ihost, u32 timeout)
1158 if (ihost->sm.current_state_id != SCIC_READY) {
1159 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
1160 __func__, ihost->sm.current_state_id);
1164 sci_mod_timer(&ihost->timer, timeout);
1165 sci_change_state(&ihost->sm, SCIC_STOPPING);
1175 * @ihost: the handle to the controller object to reset.
1181 static enum sci_status sci_controller_reset(struct isci_host *ihost)
1183 switch (ihost->sm.current_state_id) {
1192 sci_change_state(&ihost->sm, SCIC_RESETTING);
1195 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
1196 __func__, ihost->sm.current_state_id);
1201 static enum sci_status sci_controller_stop_phys(struct isci_host *ihost)
1210 phy_status = sci_phy_stop(&ihost->phys[index]);
1216 dev_warn(&ihost->pdev->dev,
1220 ihost->phys[index].phy_index, phy_status);
1230 * @ihost: host to take down
1239 void isci_host_deinit(struct isci_host *ihost)
1244 for (i = 0; i < isci_gpio_count(ihost); i++)
1245 writel(SGPIO_HW_CONTROL, &ihost->scu_registers->peg0.sgpio.output_data_select[i]);
1247 set_bit(IHOST_STOP_PENDING, &ihost->flags);
1249 spin_lock_irq(&ihost->scic_lock);
1250 sci_controller_stop(ihost, SCIC_CONTROLLER_STOP_TIMEOUT);
1251 spin_unlock_irq(&ihost->scic_lock);
1253 wait_for_stop(ihost);
1260 sci_controller_stop_phys(ihost);
1265 writel(0, &ihost->scu_registers->peg0.sgpio.interface_control);
1267 spin_lock_irq(&ihost->scic_lock);
1268 sci_controller_reset(ihost);
1269 spin_unlock_irq(&ihost->scic_lock);
1272 for (i = 0; i < ihost->logical_port_entries; i++) {
1273 struct isci_port *iport = &ihost->ports[i];
1279 struct isci_phy *iphy = &ihost->phys[i];
1283 timer_delete_sync(&ihost->port_agent.timer.timer);
1285 timer_delete_sync(&ihost->power_control.timer.timer);
1287 timer_delete_sync(&ihost->timer.timer);
1289 timer_delete_sync(&ihost->phy_timer.timer);
1310 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1312 sci_change_state(&ihost->sm, SCIC_RESET);
1317 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1319 sci_del_timer(&ihost->timer);
1332 * @ihost: This parameter represents the handle to the controller object
1347 sci_controller_set_interrupt_coalescence(struct isci_host *ihost,
1432 &ihost->smu_registers->interrupt_coalesce_control);
1435 ihost->interrupt_coalesce_number = (u16)coalesce_number;
1436 ihost->interrupt_coalesce_timeout = coalesce_timeout / 100;
1444 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1448 val = readl(&ihost->smu_registers->clock_gating_control);
1453 writel(val, &ihost->smu_registers->clock_gating_control);
1456 sci_controller_set_interrupt_coalescence(ihost, 0, 0);
1461 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1464 sci_controller_set_interrupt_coalescence(ihost, 0, 0);
1467 static enum sci_status sci_controller_stop_ports(struct isci_host *ihost)
1473 for (index = 0; index < ihost->logical_port_entries; index++) {
1474 struct isci_port *iport = &ihost->ports[index];
1482 dev_warn(&ihost->pdev->dev,
1494 static enum sci_status sci_controller_stop_devices(struct isci_host *ihost)
1502 for (index = 0; index < ihost->remote_node_entries; index++) {
1503 if (ihost->device_table[index] != NULL) {
1505 device_status = sci_remote_device_stop(ihost->device_table[index], 0);
1509 dev_warn(&ihost->pdev->dev,
1514 ihost->device_table[index], device_status);
1524 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1526 sci_controller_stop_devices(ihost);
1527 sci_controller_stop_ports(ihost);
1529 if (!sci_controller_has_remote_devices_stopping(ihost))
1530 isci_host_stop_complete(ihost);
1535 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1537 sci_del_timer(&ihost->timer);
1540 static void sci_controller_reset_hardware(struct isci_host *ihost)
1543 sci_controller_disable_interrupts(ihost);
1546 writel(0xFFFFFFFF, &ihost->smu_registers->soft_reset_control);
1552 writel(0x00000000, &ihost->smu_registers->completion_queue_get);
1555 writel(0, &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
1558 writel(~SMU_INTERRUPT_STATUS_RESERVED_MASK, &ihost->smu_registers->interrupt_status);
1563 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1565 sci_controller_reset_hardware(ihost);
1566 sci_change_state(&ihost->sm, SCIC_RESET);
1596 struct isci_host *ihost = container_of(tmr, typeof(*ihost), timer);
1597 struct sci_base_state_machine *sm = &ihost->sm;
1600 spin_lock_irqsave(&ihost->scic_lock, flags);
1606 sci_controller_transition_to_ready(ihost, SCI_FAILURE_TIMEOUT);
1609 isci_host_stop_complete(ihost);
1611 dev_err(&ihost->pdev->dev,
1617 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1620 static enum sci_status sci_controller_construct(struct isci_host *ihost,
1626 sci_init_sm(&ihost->sm, sci_controller_state_table, SCIC_INITIAL);
1628 ihost->scu_registers = scu_base;
1629 ihost->smu_registers = smu_base;
1631 sci_port_configuration_agent_construct(&ihost->port_agent);
1635 sci_port_construct(&ihost->ports[i], i, ihost);
1636 sci_port_construct(&ihost->ports[i], SCIC_SDS_DUMMY_PORT, ihost);
1641 sci_phy_construct(&ihost->phys[i],
1642 &ihost->ports[SCI_MAX_PORTS], i);
1645 ihost->invalid_phy_mask = 0;
1647 sci_init_timer(&ihost->timer, controller_timeout);
1649 return sci_controller_reset(ihost);
1728 static u8 max_spin_up(struct isci_host *ihost)
1730 if (ihost->user_parameters.max_concurr_spinup)
1731 return min_t(u8, ihost->user_parameters.max_concurr_spinup,
1734 return min_t(u8, ihost->oem_parameters.controller.max_concurr_spin_up,
1741 struct isci_host *ihost = container_of(tmr, typeof(*ihost), power_control.timer);
1746 spin_lock_irqsave(&ihost->scic_lock, flags);
1751 ihost->power_control.phys_granted_power = 0;
1753 if (ihost->power_control.phys_waiting == 0) {
1754 ihost->power_control.timer_started = false;
1760 if (ihost->power_control.phys_waiting == 0)
1763 iphy = ihost->power_control.requesters[i];
1767 if (ihost->power_control.phys_granted_power >= max_spin_up(ihost))
1770 ihost->power_control.requesters[i] = NULL;
1771 ihost->power_control.phys_waiting--;
1772 ihost->power_control.phys_granted_power++;
1779 struct isci_phy *requester = ihost->power_control.requesters[j];
1792 ihost->power_control.requesters[j] = NULL;
1793 ihost->power_control.phys_waiting--;
1806 ihost->power_control.timer_started = true;
1809 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1812 void sci_controller_power_control_queue_insert(struct isci_host *ihost,
1817 if (ihost->power_control.phys_granted_power < max_spin_up(ihost)) {
1818 ihost->power_control.phys_granted_power++;
1825 if (ihost->power_control.timer_started)
1826 sci_del_timer(&ihost->power_control.timer);
1828 sci_mod_timer(&ihost->power_control.timer,
1830 ihost->power_control.timer_started = true;
1842 current_phy = &ihost->phys[i];
1858 ihost->power_control.requesters[iphy->phy_index] = iphy;
1859 ihost->power_control.phys_waiting++;
1864 void sci_controller_power_control_queue_remove(struct isci_host *ihost,
1869 if (ihost->power_control.requesters[iphy->phy_index])
1870 ihost->power_control.phys_waiting--;
1872 ihost->power_control.requesters[iphy->phy_index] = NULL;
1893 static unsigned char *to_cable_select(struct isci_host *ihost)
1897 + ihost->id;
1899 return &ihost->oem_parameters.controller.cable_selection_mask;
1902 enum cable_selections decode_cable_selection(struct isci_host *ihost, int phy)
1904 return decode_selection_byte(phy, *to_cable_select(ihost));
1921 static void sci_controller_afe_initialization(struct isci_host *ihost)
1923 struct scu_afe_registers __iomem *afe = &ihost->scu_registers->afe;
1924 const struct sci_oem_params *oem = &ihost->oem_parameters;
1925 struct pci_dev *pdev = ihost->pdev;
1928 unsigned char cable_selection_mask = *to_cable_select(ihost);
2122 static void sci_controller_initialize_power_control(struct isci_host *ihost)
2124 sci_init_timer(&ihost->power_control.timer, power_control_timeout);
2126 memset(ihost->power_control.requesters, 0,
2127 sizeof(ihost->power_control.requesters));
2129 ihost->power_control.phys_waiting = 0;
2130 ihost->power_control.phys_granted_power = 0;
2133 static enum sci_status sci_controller_initialize(struct isci_host *ihost)
2135 struct sci_base_state_machine *sm = &ihost->sm;
2139 if (ihost->sm.current_state_id != SCIC_RESET) {
2140 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
2141 __func__, ihost->sm.current_state_id);
2147 sci_init_timer(&ihost->phy_timer, phy_startup_timeout);
2149 ihost->next_phy_to_start = 0;
2150 ihost->phy_startup_timer_pending = false;
2152 sci_controller_initialize_power_control(ihost);
2159 sci_controller_afe_initialization(ihost);
2163 writel(0, &ihost->smu_registers->soft_reset_control);
2173 status = readl(&ihost->smu_registers->control_status);
2184 val = readl(&ihost->smu_registers->device_context_capacity);
2187 ihost->logical_port_entries = min(smu_max_ports(val), SCI_MAX_PORTS);
2188 ihost->task_context_entries = min(smu_max_task_contexts(val), SCI_MAX_IO_REQUESTS);
2189 ihost->remote_node_entries = min(smu_max_rncs(val), SCI_MAX_REMOTE_DEVICES);
2195 for (i = 0; i < ihost->logical_port_entries; i++) {
2197 *ptsg = &ihost->scu_registers->peg0.ptsg;
2203 val = readl(&ihost->scu_registers->sdma.pdma_configuration);
2205 writel(val, &ihost->scu_registers->sdma.pdma_configuration);
2207 val = readl(&ihost->scu_registers->sdma.cdma_configuration);
2209 writel(val, &ihost->scu_registers->sdma.cdma_configuration);
2216 result = sci_phy_initialize(&ihost->phys[i],
2217 &ihost->scu_registers->peg0.pe[i].tl,
2218 &ihost->scu_registers->peg0.pe[i].ll);
2223 for (i = 0; i < ihost->logical_port_entries; i++) {
2224 struct isci_port *iport = &ihost->ports[i];
2226 iport->port_task_scheduler_registers = &ihost->scu_registers->peg0.ptsg.port[i];
2227 iport->port_pe_configuration_register = &ihost->scu_registers->peg0.ptsg.protocol_engine[0];
2228 iport->viit_registers = &ihost->scu_registers->peg0.viit[i];
2231 result = sci_port_configuration_agent_initialize(ihost, &ihost->port_agent);
2244 static int sci_controller_dma_alloc(struct isci_host *ihost)
2246 struct device *dev = &ihost->pdev->dev;
2251 if (ihost->completion_queue)
2255 ihost->completion_queue = dmam_alloc_coherent(dev, size, &ihost->cq_dma,
2257 if (!ihost->completion_queue)
2260 size = ihost->remote_node_entries * sizeof(union scu_remote_node_context);
2261 ihost->remote_node_context_table = dmam_alloc_coherent(dev, size, &ihost->rnc_dma,
2264 if (!ihost->remote_node_context_table)
2267 size = ihost->task_context_entries * sizeof(struct scu_task_context),
2268 ihost->task_context_table = dmam_alloc_coherent(dev, size, &ihost->tc_dma,
2270 if (!ihost->task_context_table)
2274 ihost->ufi_buf = dmam_alloc_coherent(dev, size, &ihost->ufi_dma, GFP_KERNEL);
2275 if (!ihost->ufi_buf)
2286 ireq->tc = &ihost->task_context_table[i];
2287 ireq->owning_controller = ihost;
2289 ireq->isci_host = ihost;
2290 ihost->reqs[i] = ireq;
2296 static int sci_controller_mem_init(struct isci_host *ihost)
2298 int err = sci_controller_dma_alloc(ihost);
2303 writel(lower_32_bits(ihost->cq_dma), &ihost->smu_registers->completion_queue_lower);
2304 writel(upper_32_bits(ihost->cq_dma), &ihost->smu_registers->completion_queue_upper);
2306 writel(lower_32_bits(ihost->rnc_dma), &ihost->smu_registers->remote_node_context_lower);
2307 writel(upper_32_bits(ihost->rnc_dma), &ihost->smu_registers->remote_node_context_upper);
2309 writel(lower_32_bits(ihost->tc_dma), &ihost->smu_registers->host_task_table_lower);
2310 writel(upper_32_bits(ihost->tc_dma), &ihost->smu_registers->host_task_table_upper);
2312 sci_unsolicited_frame_control_construct(ihost);
2318 writel(lower_32_bits(ihost->uf_control.headers.physical_address),
2319 &ihost->scu_registers->sdma.uf_header_base_address_lower);
2320 writel(upper_32_bits(ihost->uf_control.headers.physical_address),
2321 &ihost->scu_registers->sdma.uf_header_base_address_upper);
2323 writel(lower_32_bits(ihost->uf_control.address_table.physical_address),
2324 &ihost->scu_registers->sdma.uf_address_table_lower);
2325 writel(upper_32_bits(ihost->uf_control.address_table.physical_address),
2326 &ihost->scu_registers->sdma.uf_address_table_upper);
2333 * @ihost: host to init
2339 int isci_host_init(struct isci_host *ihost)
2344 spin_lock_irq(&ihost->scic_lock);
2345 status = sci_controller_construct(ihost, scu_base(ihost), smu_base(ihost));
2346 spin_unlock_irq(&ihost->scic_lock);
2348 dev_err(&ihost->pdev->dev,
2355 spin_lock_irq(&ihost->scic_lock);
2356 status = sci_controller_initialize(ihost);
2357 spin_unlock_irq(&ihost->scic_lock);
2359 dev_warn(&ihost->pdev->dev,
2366 err = sci_controller_mem_init(ihost);
2371 writel(1, &ihost->scu_registers->peg0.sgpio.interface_control);
2372 for (i = 0; i < isci_gpio_count(ihost); i++)
2373 writel(SGPIO_HW_CONTROL, &ihost->scu_registers->peg0.sgpio.output_data_select[i]);
2374 writel(0, &ihost->scu_registers->peg0.sgpio.vendor_specific_code);
2379 void sci_controller_link_up(struct isci_host *ihost, struct isci_port *iport,
2382 switch (ihost->sm.current_state_id) {
2384 sci_del_timer(&ihost->phy_timer);
2385 ihost->phy_startup_timer_pending = false;
2386 ihost->port_agent.link_up_handler(ihost, &ihost->port_agent,
2388 sci_controller_start_next_phy(ihost);
2391 ihost->port_agent.link_up_handler(ihost, &ihost->port_agent,
2395 dev_dbg(&ihost->pdev->dev,
2398 ihost->sm.current_state_id);
2402 void sci_controller_link_down(struct isci_host *ihost, struct isci_port *iport,
2405 switch (ihost->sm.current_state_id) {
2408 ihost->port_agent.link_down_handler(ihost, &ihost->port_agent,
2412 dev_dbg(&ihost->pdev->dev,
2417 ihost->sm.current_state_id);
2421 bool sci_controller_has_remote_devices_stopping(struct isci_host *ihost)
2425 for (index = 0; index < ihost->remote_node_entries; index++) {
2426 if ((ihost->device_table[index] != NULL) &&
2427 (ihost->device_table[index]->sm.current_state_id == SCI_DEV_STOPPING))
2434 void sci_controller_remote_device_stopped(struct isci_host *ihost,
2437 if (ihost->sm.current_state_id != SCIC_STOPPING) {
2438 dev_dbg(&ihost->pdev->dev,
2441 ihost, idev,
2442 ihost->sm.current_state_id);
2446 if (!sci_controller_has_remote_devices_stopping(ihost))
2447 isci_host_stop_complete(ihost);
2450 void sci_controller_post_request(struct isci_host *ihost, u32 request)
2452 dev_dbg(&ihost->pdev->dev, "%s[%d]: %#x\n",
2453 __func__, ihost->id, request);
2455 writel(request, &ihost->smu_registers->post_context_port);
2458 struct isci_request *sci_request_by_tag(struct isci_host *ihost, u16 io_tag)
2465 if (task_index < ihost->task_context_entries) {
2466 struct isci_request *ireq = ihost->reqs[task_index];
2471 if (task_sequence == ihost->io_request_sequence[task_index])
2484 * @ihost: This is the controller object which contains the set of
2494 enum sci_status sci_controller_allocate_remote_node_context(struct isci_host *ihost,
2502 &ihost->available_remote_nodes, remote_node_count
2506 ihost->device_table[node_index] = idev;
2516 void sci_controller_free_remote_node_context(struct isci_host *ihost,
2522 if (ihost->device_table[node_id] == idev) {
2523 ihost->device_table[node_id] = NULL;
2526 &ihost->available_remote_nodes, remote_node_count, node_id
2543 void sci_controller_release_frame(struct isci_host *ihost, u32 frame_index)
2545 if (sci_unsolicited_frame_control_release_frame(&ihost->uf_control, frame_index))
2546 writel(ihost->uf_control.get,
2547 &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
2550 void isci_tci_free(struct isci_host *ihost, u16 tci)
2552 u16 tail = ihost->tci_tail & (SCI_MAX_IO_REQUESTS-1);
2554 ihost->tci_pool[tail] = tci;
2555 ihost->tci_tail = tail + 1;
2558 static u16 isci_tci_alloc(struct isci_host *ihost)
2560 u16 head = ihost->tci_head & (SCI_MAX_IO_REQUESTS-1);
2561 u16 tci = ihost->tci_pool[head];
2563 ihost->tci_head = head + 1;
2567 static u16 isci_tci_space(struct isci_host *ihost)
2569 return CIRC_SPACE(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS);
2572 u16 isci_alloc_tag(struct isci_host *ihost)
2574 if (isci_tci_space(ihost)) {
2575 u16 tci = isci_tci_alloc(ihost);
2576 u8 seq = ihost->io_request_sequence[tci];
2584 enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag)
2590 if (isci_tci_active(ihost) == 0)
2593 if (seq == ihost->io_request_sequence[tci]) {
2594 ihost->io_request_sequence[tci] = (seq+1) & (SCI_MAX_SEQ-1);
2596 isci_tci_free(ihost, tci);
2603 enum sci_status sci_controller_start_io(struct isci_host *ihost,
2609 if (ihost->sm.current_state_id != SCIC_READY) {
2610 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
2611 __func__, ihost->sm.current_state_id);
2615 status = sci_remote_device_start_io(ihost, idev, ireq);
2620 sci_controller_post_request(ihost, ireq->post_context);
2624 enum sci_status sci_controller_terminate_request(struct isci_host *ihost,
2634 if (ihost->sm.current_state_id != SCIC_READY) {
2635 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
2636 __func__, ihost->sm.current_state_id);
2641 dev_dbg(&ihost->pdev->dev, "%s: status=%d; ireq=%p; flags=%lx\n",
2651 ihost, ireq->post_context |
2662 * @ihost: The handle to the controller object for which to complete the
2668 enum sci_status sci_controller_complete_io(struct isci_host *ihost,
2674 switch (ihost->sm.current_state_id) {
2679 status = sci_remote_device_complete_io(ihost, idev, ireq);
2686 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
2687 __func__, ihost->sm.current_state_id);
2695 struct isci_host *ihost = ireq->owning_controller;
2697 if (ihost->sm.current_state_id != SCIC_READY) {
2698 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
2699 __func__, ihost->sm.current_state_id);
2704 sci_controller_post_request(ihost, ireq->post_context);
2711 * @ihost: the handle to the controller object for which to start the task
2717 enum sci_status sci_controller_start_task(struct isci_host *ihost,
2723 if (ihost->sm.current_state_id != SCIC_READY) {
2724 dev_warn(&ihost->pdev->dev,
2731 status = sci_remote_device_start_task(ihost, idev, ireq);
2744 sci_controller_post_request(ihost, ireq->post_context);
2753 static int sci_write_gpio_tx_gp(struct isci_host *ihost, u8 reg_index, u8 reg_count, u8 *write_data)
2761 for (d = 0; d < isci_gpio_count(ihost); d++) {
2780 writel(val, &ihost->scu_registers->peg0.sgpio.output_data_select[d]);
2792 struct isci_host *ihost = sas_ha->lldd_ha;
2797 written = sci_write_gpio_tx_gp(ihost, reg_index, reg_count, write_data);