1 /*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56 #include "isci.h"
57 #include "port.h"
58 #include "request.h"
59
60 #define SCIC_SDS_PORT_HARD_RESET_TIMEOUT (1000)
61 #define SCU_DUMMY_INDEX (0xFFFF)
62
isci_port_change_state(struct isci_port * iport,enum isci_status status)63 static void isci_port_change_state(struct isci_port *iport, enum isci_status status)
64 {
65 unsigned long flags;
66
67 dev_dbg(&iport->isci_host->pdev->dev,
68 "%s: iport = %p, state = 0x%x\n",
69 __func__, iport, status);
70
71 /* XXX pointless lock */
72 spin_lock_irqsave(&iport->state_lock, flags);
73 iport->status = status;
74 spin_unlock_irqrestore(&iport->state_lock, flags);
75 }
76
sci_port_get_protocols(struct isci_port * iport,struct sci_phy_proto * proto)77 static void sci_port_get_protocols(struct isci_port *iport, struct sci_phy_proto *proto)
78 {
79 u8 index;
80
81 proto->all = 0;
82 for (index = 0; index < SCI_MAX_PHYS; index++) {
83 struct isci_phy *iphy = iport->phy_table[index];
84
85 if (!iphy)
86 continue;
87 sci_phy_get_protocols(iphy, proto);
88 }
89 }
90
sci_port_get_phys(struct isci_port * iport)91 static u32 sci_port_get_phys(struct isci_port *iport)
92 {
93 u32 index;
94 u32 mask;
95
96 mask = 0;
97 for (index = 0; index < SCI_MAX_PHYS; index++)
98 if (iport->phy_table[index])
99 mask |= (1 << index);
100
101 return mask;
102 }
103
104 /**
105 * sci_port_get_properties() - This method simply returns the properties
106 * regarding the port, such as: physical index, protocols, sas address, etc.
107 * @port: this parameter specifies the port for which to retrieve the physical
108 * index.
109 * @properties: This parameter specifies the properties structure into which to
110 * copy the requested information.
111 *
112 * Indicate if the user specified a valid port. SCI_SUCCESS This value is
113 * returned if the specified port was valid. SCI_FAILURE_INVALID_PORT This
114 * value is returned if the specified port is not valid. When this value is
115 * returned, no data is copied to the properties output parameter.
116 */
sci_port_get_properties(struct isci_port * iport,struct sci_port_properties * prop)117 enum sci_status sci_port_get_properties(struct isci_port *iport,
118 struct sci_port_properties *prop)
119 {
120 if (!iport || iport->logical_port_index == SCIC_SDS_DUMMY_PORT)
121 return SCI_FAILURE_INVALID_PORT;
122
123 prop->index = iport->logical_port_index;
124 prop->phy_mask = sci_port_get_phys(iport);
125 sci_port_get_sas_address(iport, &prop->local.sas_address);
126 sci_port_get_protocols(iport, &prop->local.protocols);
127 sci_port_get_attached_sas_address(iport, &prop->remote.sas_address);
128
129 return SCI_SUCCESS;
130 }
131
sci_port_bcn_enable(struct isci_port * iport)132 static void sci_port_bcn_enable(struct isci_port *iport)
133 {
134 struct isci_phy *iphy;
135 u32 val;
136 int i;
137
138 for (i = 0; i < ARRAY_SIZE(iport->phy_table); i++) {
139 iphy = iport->phy_table[i];
140 if (!iphy)
141 continue;
142 val = readl(&iphy->link_layer_registers->link_layer_control);
143 /* clear the bit by writing 1. */
144 writel(val, &iphy->link_layer_registers->link_layer_control);
145 }
146 }
147
isci_port_bc_change_received(struct isci_host * ihost,struct isci_port * iport,struct isci_phy * iphy)148 static void isci_port_bc_change_received(struct isci_host *ihost,
149 struct isci_port *iport,
150 struct isci_phy *iphy)
151 {
152 dev_dbg(&ihost->pdev->dev,
153 "%s: isci_phy = %p, sas_phy = %p\n",
154 __func__, iphy, &iphy->sas_phy);
155
156 ihost->sas_ha.notify_port_event(&iphy->sas_phy, PORTE_BROADCAST_RCVD);
157 sci_port_bcn_enable(iport);
158 }
159
isci_port_link_up(struct isci_host * isci_host,struct isci_port * iport,struct isci_phy * iphy)160 static void isci_port_link_up(struct isci_host *isci_host,
161 struct isci_port *iport,
162 struct isci_phy *iphy)
163 {
164 unsigned long flags;
165 struct sci_port_properties properties;
166 unsigned long success = true;
167
168 BUG_ON(iphy->isci_port != NULL);
169
170 iphy->isci_port = iport;
171
172 dev_dbg(&isci_host->pdev->dev,
173 "%s: isci_port = %p\n",
174 __func__, iport);
175
176 spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags);
177
178 isci_port_change_state(iphy->isci_port, isci_starting);
179
180 sci_port_get_properties(iport, &properties);
181
182 if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA) {
183 u64 attached_sas_address;
184
185 iphy->sas_phy.oob_mode = SATA_OOB_MODE;
186 iphy->sas_phy.frame_rcvd_size = sizeof(struct dev_to_host_fis);
187
188 /*
189 * For direct-attached SATA devices, the SCI core will
190 * automagically assign a SAS address to the end device
191 * for the purpose of creating a port. This SAS address
192 * will not be the same as assigned to the PHY and needs
193 * to be obtained from struct sci_port_properties properties.
194 */
195 attached_sas_address = properties.remote.sas_address.high;
196 attached_sas_address <<= 32;
197 attached_sas_address |= properties.remote.sas_address.low;
198 swab64s(&attached_sas_address);
199
200 memcpy(&iphy->sas_phy.attached_sas_addr,
201 &attached_sas_address, sizeof(attached_sas_address));
202 } else if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS) {
203 iphy->sas_phy.oob_mode = SAS_OOB_MODE;
204 iphy->sas_phy.frame_rcvd_size = sizeof(struct sas_identify_frame);
205
206 /* Copy the attached SAS address from the IAF */
207 memcpy(iphy->sas_phy.attached_sas_addr,
208 iphy->frame_rcvd.iaf.sas_addr, SAS_ADDR_SIZE);
209 } else {
210 dev_err(&isci_host->pdev->dev, "%s: unkown target\n", __func__);
211 success = false;
212 }
213
214 iphy->sas_phy.phy->negotiated_linkrate = sci_phy_linkrate(iphy);
215
216 spin_unlock_irqrestore(&iphy->sas_phy.frame_rcvd_lock, flags);
217
218 /* Notify libsas that we have an address frame, if indeed
219 * we've found an SSP, SMP, or STP target */
220 if (success)
221 isci_host->sas_ha.notify_port_event(&iphy->sas_phy,
222 PORTE_BYTES_DMAED);
223 }
224
225
226 /**
227 * isci_port_link_down() - This function is called by the sci core when a link
228 * becomes inactive.
229 * @isci_host: This parameter specifies the isci host object.
230 * @phy: This parameter specifies the isci phy with the active link.
231 * @port: This parameter specifies the isci port with the active link.
232 *
233 */
isci_port_link_down(struct isci_host * isci_host,struct isci_phy * isci_phy,struct isci_port * isci_port)234 static void isci_port_link_down(struct isci_host *isci_host,
235 struct isci_phy *isci_phy,
236 struct isci_port *isci_port)
237 {
238 struct isci_remote_device *isci_device;
239
240 dev_dbg(&isci_host->pdev->dev,
241 "%s: isci_port = %p\n", __func__, isci_port);
242
243 if (isci_port) {
244
245 /* check to see if this is the last phy on this port. */
246 if (isci_phy->sas_phy.port &&
247 isci_phy->sas_phy.port->num_phys == 1) {
248 /* change the state for all devices on this port. The
249 * next task sent to this device will be returned as
250 * SAS_TASK_UNDELIVERED, and the scsi mid layer will
251 * remove the target
252 */
253 list_for_each_entry(isci_device,
254 &isci_port->remote_dev_list,
255 node) {
256 dev_dbg(&isci_host->pdev->dev,
257 "%s: isci_device = %p\n",
258 __func__, isci_device);
259 set_bit(IDEV_GONE, &isci_device->flags);
260 }
261 isci_port_change_state(isci_port, isci_stopping);
262 }
263 }
264
265 /* Notify libsas of the borken link, this will trigger calls to our
266 * isci_port_deformed and isci_dev_gone functions.
267 */
268 sas_phy_disconnected(&isci_phy->sas_phy);
269 isci_host->sas_ha.notify_phy_event(&isci_phy->sas_phy,
270 PHYE_LOSS_OF_SIGNAL);
271
272 isci_phy->isci_port = NULL;
273
274 dev_dbg(&isci_host->pdev->dev,
275 "%s: isci_port = %p - Done\n", __func__, isci_port);
276 }
277
278
279 /**
280 * isci_port_ready() - This function is called by the sci core when a link
281 * becomes ready.
282 * @isci_host: This parameter specifies the isci host object.
283 * @port: This parameter specifies the sci port with the active link.
284 *
285 */
isci_port_ready(struct isci_host * isci_host,struct isci_port * isci_port)286 static void isci_port_ready(struct isci_host *isci_host, struct isci_port *isci_port)
287 {
288 dev_dbg(&isci_host->pdev->dev,
289 "%s: isci_port = %p\n", __func__, isci_port);
290
291 complete_all(&isci_port->start_complete);
292 isci_port_change_state(isci_port, isci_ready);
293 return;
294 }
295
296 /**
297 * isci_port_not_ready() - This function is called by the sci core when a link
298 * is not ready. All remote devices on this link will be removed if they are
299 * in the stopping state.
300 * @isci_host: This parameter specifies the isci host object.
301 * @port: This parameter specifies the sci port with the active link.
302 *
303 */
isci_port_not_ready(struct isci_host * isci_host,struct isci_port * isci_port)304 static void isci_port_not_ready(struct isci_host *isci_host, struct isci_port *isci_port)
305 {
306 dev_dbg(&isci_host->pdev->dev,
307 "%s: isci_port = %p\n", __func__, isci_port);
308 }
309
isci_port_stop_complete(struct isci_host * ihost,struct isci_port * iport,enum sci_status completion_status)310 static void isci_port_stop_complete(struct isci_host *ihost,
311 struct isci_port *iport,
312 enum sci_status completion_status)
313 {
314 dev_dbg(&ihost->pdev->dev, "Port stop complete\n");
315 }
316
317
is_port_ready_state(enum sci_port_states state)318 static bool is_port_ready_state(enum sci_port_states state)
319 {
320 switch (state) {
321 case SCI_PORT_READY:
322 case SCI_PORT_SUB_WAITING:
323 case SCI_PORT_SUB_OPERATIONAL:
324 case SCI_PORT_SUB_CONFIGURING:
325 return true;
326 default:
327 return false;
328 }
329 }
330
331 /* flag dummy rnc hanling when exiting a ready state */
port_state_machine_change(struct isci_port * iport,enum sci_port_states state)332 static void port_state_machine_change(struct isci_port *iport,
333 enum sci_port_states state)
334 {
335 struct sci_base_state_machine *sm = &iport->sm;
336 enum sci_port_states old_state = sm->current_state_id;
337
338 if (is_port_ready_state(old_state) && !is_port_ready_state(state))
339 iport->ready_exit = true;
340
341 sci_change_state(sm, state);
342 iport->ready_exit = false;
343 }
344
345 /**
346 * isci_port_hard_reset_complete() - This function is called by the sci core
347 * when the hard reset complete notification has been received.
348 * @port: This parameter specifies the sci port with the active link.
349 * @completion_status: This parameter specifies the core status for the reset
350 * process.
351 *
352 */
isci_port_hard_reset_complete(struct isci_port * isci_port,enum sci_status completion_status)353 static void isci_port_hard_reset_complete(struct isci_port *isci_port,
354 enum sci_status completion_status)
355 {
356 dev_dbg(&isci_port->isci_host->pdev->dev,
357 "%s: isci_port = %p, completion_status=%x\n",
358 __func__, isci_port, completion_status);
359
360 /* Save the status of the hard reset from the port. */
361 isci_port->hard_reset_status = completion_status;
362
363 if (completion_status != SCI_SUCCESS) {
364
365 /* The reset failed. The port state is now SCI_PORT_FAILED. */
366 if (isci_port->active_phy_mask == 0) {
367
368 /* Generate the link down now to the host, since it
369 * was intercepted by the hard reset state machine when
370 * it really happened.
371 */
372 isci_port_link_down(isci_port->isci_host,
373 &isci_port->isci_host->phys[
374 isci_port->last_active_phy],
375 isci_port);
376 }
377 /* Advance the port state so that link state changes will be
378 * noticed.
379 */
380 port_state_machine_change(isci_port, SCI_PORT_SUB_WAITING);
381
382 }
383 complete_all(&isci_port->hard_reset_complete);
384 }
385
386 /* This method will return a true value if the specified phy can be assigned to
387 * this port The following is a list of phys for each port that are allowed: -
388 * Port 0 - 3 2 1 0 - Port 1 - 1 - Port 2 - 3 2 - Port 3 - 3 This method
389 * doesn't preclude all configurations. It merely ensures that a phy is part
390 * of the allowable set of phy identifiers for that port. For example, one
391 * could assign phy 3 to port 0 and no other phys. Please refer to
392 * sci_port_is_phy_mask_valid() for information regarding whether the
393 * phy_mask for a port can be supported. bool true if this is a valid phy
394 * assignment for the port false if this is not a valid phy assignment for the
395 * port
396 */
sci_port_is_valid_phy_assignment(struct isci_port * iport,u32 phy_index)397 bool sci_port_is_valid_phy_assignment(struct isci_port *iport, u32 phy_index)
398 {
399 struct isci_host *ihost = iport->owning_controller;
400 struct sci_user_parameters *user = &ihost->user_parameters;
401
402 /* Initialize to invalid value. */
403 u32 existing_phy_index = SCI_MAX_PHYS;
404 u32 index;
405
406 if ((iport->physical_port_index == 1) && (phy_index != 1))
407 return false;
408
409 if (iport->physical_port_index == 3 && phy_index != 3)
410 return false;
411
412 if (iport->physical_port_index == 2 &&
413 (phy_index == 0 || phy_index == 1))
414 return false;
415
416 for (index = 0; index < SCI_MAX_PHYS; index++)
417 if (iport->phy_table[index] && index != phy_index)
418 existing_phy_index = index;
419
420 /* Ensure that all of the phys in the port are capable of
421 * operating at the same maximum link rate.
422 */
423 if (existing_phy_index < SCI_MAX_PHYS &&
424 user->phys[phy_index].max_speed_generation !=
425 user->phys[existing_phy_index].max_speed_generation)
426 return false;
427
428 return true;
429 }
430
431 /**
432 *
433 * @sci_port: This is the port object for which to determine if the phy mask
434 * can be supported.
435 *
436 * This method will return a true value if the port's phy mask can be supported
437 * by the SCU. The following is a list of valid PHY mask configurations for
438 * each port: - Port 0 - [[3 2] 1] 0 - Port 1 - [1] - Port 2 - [[3] 2]
439 * - Port 3 - [3] This method returns a boolean indication specifying if the
440 * phy mask can be supported. true if this is a valid phy assignment for the
441 * port false if this is not a valid phy assignment for the port
442 */
sci_port_is_phy_mask_valid(struct isci_port * iport,u32 phy_mask)443 static bool sci_port_is_phy_mask_valid(
444 struct isci_port *iport,
445 u32 phy_mask)
446 {
447 if (iport->physical_port_index == 0) {
448 if (((phy_mask & 0x0F) == 0x0F)
449 || ((phy_mask & 0x03) == 0x03)
450 || ((phy_mask & 0x01) == 0x01)
451 || (phy_mask == 0))
452 return true;
453 } else if (iport->physical_port_index == 1) {
454 if (((phy_mask & 0x02) == 0x02)
455 || (phy_mask == 0))
456 return true;
457 } else if (iport->physical_port_index == 2) {
458 if (((phy_mask & 0x0C) == 0x0C)
459 || ((phy_mask & 0x04) == 0x04)
460 || (phy_mask == 0))
461 return true;
462 } else if (iport->physical_port_index == 3) {
463 if (((phy_mask & 0x08) == 0x08)
464 || (phy_mask == 0))
465 return true;
466 }
467
468 return false;
469 }
470
471 /*
472 * This method retrieves a currently active (i.e. connected) phy contained in
473 * the port. Currently, the lowest order phy that is connected is returned.
474 * This method returns a pointer to a SCIS_SDS_PHY object. NULL This value is
475 * returned if there are no currently active (i.e. connected to a remote end
476 * point) phys contained in the port. All other values specify a struct sci_phy
477 * object that is active in the port.
478 */
sci_port_get_a_connected_phy(struct isci_port * iport)479 static struct isci_phy *sci_port_get_a_connected_phy(struct isci_port *iport)
480 {
481 u32 index;
482 struct isci_phy *iphy;
483
484 for (index = 0; index < SCI_MAX_PHYS; index++) {
485 /* Ensure that the phy is both part of the port and currently
486 * connected to the remote end-point.
487 */
488 iphy = iport->phy_table[index];
489 if (iphy && sci_port_active_phy(iport, iphy))
490 return iphy;
491 }
492
493 return NULL;
494 }
495
sci_port_set_phy(struct isci_port * iport,struct isci_phy * iphy)496 static enum sci_status sci_port_set_phy(struct isci_port *iport, struct isci_phy *iphy)
497 {
498 /* Check to see if we can add this phy to a port
499 * that means that the phy is not part of a port and that the port does
500 * not already have a phy assinged to the phy index.
501 */
502 if (!iport->phy_table[iphy->phy_index] &&
503 !phy_get_non_dummy_port(iphy) &&
504 sci_port_is_valid_phy_assignment(iport, iphy->phy_index)) {
505 /* Phy is being added in the stopped state so we are in MPC mode
506 * make logical port index = physical port index
507 */
508 iport->logical_port_index = iport->physical_port_index;
509 iport->phy_table[iphy->phy_index] = iphy;
510 sci_phy_set_port(iphy, iport);
511
512 return SCI_SUCCESS;
513 }
514
515 return SCI_FAILURE;
516 }
517
sci_port_clear_phy(struct isci_port * iport,struct isci_phy * iphy)518 static enum sci_status sci_port_clear_phy(struct isci_port *iport, struct isci_phy *iphy)
519 {
520 /* Make sure that this phy is part of this port */
521 if (iport->phy_table[iphy->phy_index] == iphy &&
522 phy_get_non_dummy_port(iphy) == iport) {
523 struct isci_host *ihost = iport->owning_controller;
524
525 /* Yep it is assigned to this port so remove it */
526 sci_phy_set_port(iphy, &ihost->ports[SCI_MAX_PORTS]);
527 iport->phy_table[iphy->phy_index] = NULL;
528 return SCI_SUCCESS;
529 }
530
531 return SCI_FAILURE;
532 }
533
sci_port_get_sas_address(struct isci_port * iport,struct sci_sas_address * sas)534 void sci_port_get_sas_address(struct isci_port *iport, struct sci_sas_address *sas)
535 {
536 u32 index;
537
538 sas->high = 0;
539 sas->low = 0;
540 for (index = 0; index < SCI_MAX_PHYS; index++)
541 if (iport->phy_table[index])
542 sci_phy_get_sas_address(iport->phy_table[index], sas);
543 }
544
sci_port_get_attached_sas_address(struct isci_port * iport,struct sci_sas_address * sas)545 void sci_port_get_attached_sas_address(struct isci_port *iport, struct sci_sas_address *sas)
546 {
547 struct isci_phy *iphy;
548
549 /*
550 * Ensure that the phy is both part of the port and currently
551 * connected to the remote end-point.
552 */
553 iphy = sci_port_get_a_connected_phy(iport);
554 if (iphy) {
555 if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA) {
556 sci_phy_get_attached_sas_address(iphy, sas);
557 } else {
558 sci_phy_get_sas_address(iphy, sas);
559 sas->low += iphy->phy_index;
560 }
561 } else {
562 sas->high = 0;
563 sas->low = 0;
564 }
565 }
566
567 /**
568 * sci_port_construct_dummy_rnc() - create dummy rnc for si workaround
569 *
570 * @sci_port: logical port on which we need to create the remote node context
571 * @rni: remote node index for this remote node context.
572 *
573 * This routine will construct a dummy remote node context data structure
574 * This structure will be posted to the hardware to work around a scheduler
575 * error in the hardware.
576 */
sci_port_construct_dummy_rnc(struct isci_port * iport,u16 rni)577 static void sci_port_construct_dummy_rnc(struct isci_port *iport, u16 rni)
578 {
579 union scu_remote_node_context *rnc;
580
581 rnc = &iport->owning_controller->remote_node_context_table[rni];
582
583 memset(rnc, 0, sizeof(union scu_remote_node_context));
584
585 rnc->ssp.remote_sas_address_hi = 0;
586 rnc->ssp.remote_sas_address_lo = 0;
587
588 rnc->ssp.remote_node_index = rni;
589 rnc->ssp.remote_node_port_width = 1;
590 rnc->ssp.logical_port_index = iport->physical_port_index;
591
592 rnc->ssp.nexus_loss_timer_enable = false;
593 rnc->ssp.check_bit = false;
594 rnc->ssp.is_valid = true;
595 rnc->ssp.is_remote_node_context = true;
596 rnc->ssp.function_number = 0;
597 rnc->ssp.arbitration_wait_time = 0;
598 }
599
600 /*
601 * construct a dummy task context data structure. This
602 * structure will be posted to the hardwre to work around a scheduler error
603 * in the hardware.
604 */
sci_port_construct_dummy_task(struct isci_port * iport,u16 tag)605 static void sci_port_construct_dummy_task(struct isci_port *iport, u16 tag)
606 {
607 struct isci_host *ihost = iport->owning_controller;
608 struct scu_task_context *task_context;
609
610 task_context = &ihost->task_context_table[ISCI_TAG_TCI(tag)];
611 memset(task_context, 0, sizeof(struct scu_task_context));
612
613 task_context->initiator_request = 1;
614 task_context->connection_rate = 1;
615 task_context->logical_port_index = iport->physical_port_index;
616 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP;
617 task_context->task_index = ISCI_TAG_TCI(tag);
618 task_context->valid = SCU_TASK_CONTEXT_VALID;
619 task_context->context_type = SCU_TASK_CONTEXT_TYPE;
620 task_context->remote_node_index = iport->reserved_rni;
621 task_context->do_not_dma_ssp_good_response = 1;
622 task_context->task_phase = 0x01;
623 }
624
sci_port_destroy_dummy_resources(struct isci_port * iport)625 static void sci_port_destroy_dummy_resources(struct isci_port *iport)
626 {
627 struct isci_host *ihost = iport->owning_controller;
628
629 if (iport->reserved_tag != SCI_CONTROLLER_INVALID_IO_TAG)
630 isci_free_tag(ihost, iport->reserved_tag);
631
632 if (iport->reserved_rni != SCU_DUMMY_INDEX)
633 sci_remote_node_table_release_remote_node_index(&ihost->available_remote_nodes,
634 1, iport->reserved_rni);
635
636 iport->reserved_rni = SCU_DUMMY_INDEX;
637 iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG;
638 }
639
sci_port_setup_transports(struct isci_port * iport,u32 device_id)640 void sci_port_setup_transports(struct isci_port *iport, u32 device_id)
641 {
642 u8 index;
643
644 for (index = 0; index < SCI_MAX_PHYS; index++) {
645 if (iport->active_phy_mask & (1 << index))
646 sci_phy_setup_transport(iport->phy_table[index], device_id);
647 }
648 }
649
sci_port_resume_phy(struct isci_port * iport,struct isci_phy * iphy)650 static void sci_port_resume_phy(struct isci_port *iport, struct isci_phy *iphy)
651 {
652 sci_phy_resume(iphy);
653 iport->enabled_phy_mask |= 1 << iphy->phy_index;
654 }
655
sci_port_activate_phy(struct isci_port * iport,struct isci_phy * iphy,u8 flags)656 static void sci_port_activate_phy(struct isci_port *iport,
657 struct isci_phy *iphy,
658 u8 flags)
659 {
660 struct isci_host *ihost = iport->owning_controller;
661
662 if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA && (flags & PF_RESUME))
663 sci_phy_resume(iphy);
664
665 iport->active_phy_mask |= 1 << iphy->phy_index;
666
667 sci_controller_clear_invalid_phy(ihost, iphy);
668
669 if (flags & PF_NOTIFY)
670 isci_port_link_up(ihost, iport, iphy);
671 }
672
sci_port_deactivate_phy(struct isci_port * iport,struct isci_phy * iphy,bool do_notify_user)673 void sci_port_deactivate_phy(struct isci_port *iport, struct isci_phy *iphy,
674 bool do_notify_user)
675 {
676 struct isci_host *ihost = iport->owning_controller;
677
678 iport->active_phy_mask &= ~(1 << iphy->phy_index);
679 iport->enabled_phy_mask &= ~(1 << iphy->phy_index);
680 if (!iport->active_phy_mask)
681 iport->last_active_phy = iphy->phy_index;
682
683 iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN;
684
685 /* Re-assign the phy back to the LP as if it were a narrow port for APC
686 * mode. For MPC mode, the phy will remain in the port.
687 */
688 if (iport->owning_controller->oem_parameters.controller.mode_type ==
689 SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE)
690 writel(iphy->phy_index,
691 &iport->port_pe_configuration_register[iphy->phy_index]);
692
693 if (do_notify_user == true)
694 isci_port_link_down(ihost, iphy, iport);
695 }
696
sci_port_invalid_link_up(struct isci_port * iport,struct isci_phy * iphy)697 static void sci_port_invalid_link_up(struct isci_port *iport, struct isci_phy *iphy)
698 {
699 struct isci_host *ihost = iport->owning_controller;
700
701 /*
702 * Check to see if we have alreay reported this link as bad and if
703 * not go ahead and tell the SCI_USER that we have discovered an
704 * invalid link.
705 */
706 if ((ihost->invalid_phy_mask & (1 << iphy->phy_index)) == 0) {
707 ihost->invalid_phy_mask |= 1 << iphy->phy_index;
708 dev_warn(&ihost->pdev->dev, "Invalid link up!\n");
709 }
710 }
711
712 /**
713 * sci_port_general_link_up_handler - phy can be assigned to port?
714 * @sci_port: sci_port object for which has a phy that has gone link up.
715 * @sci_phy: This is the struct isci_phy object that has gone link up.
716 * @flags: PF_RESUME, PF_NOTIFY to sci_port_activate_phy
717 *
718 * Determine if this phy can be assigned to this port . If the phy is
719 * not a valid PHY for this port then the function will notify the user.
720 * A PHY can only be part of a port if it's attached SAS ADDRESS is the
721 * same as all other PHYs in the same port.
722 */
sci_port_general_link_up_handler(struct isci_port * iport,struct isci_phy * iphy,u8 flags)723 static void sci_port_general_link_up_handler(struct isci_port *iport,
724 struct isci_phy *iphy,
725 u8 flags)
726 {
727 struct sci_sas_address port_sas_address;
728 struct sci_sas_address phy_sas_address;
729
730 sci_port_get_attached_sas_address(iport, &port_sas_address);
731 sci_phy_get_attached_sas_address(iphy, &phy_sas_address);
732
733 /* If the SAS address of the new phy matches the SAS address of
734 * other phys in the port OR this is the first phy in the port,
735 * then activate the phy and allow it to be used for operations
736 * in this port.
737 */
738 if ((phy_sas_address.high == port_sas_address.high &&
739 phy_sas_address.low == port_sas_address.low) ||
740 iport->active_phy_mask == 0) {
741 struct sci_base_state_machine *sm = &iport->sm;
742
743 sci_port_activate_phy(iport, iphy, flags);
744 if (sm->current_state_id == SCI_PORT_RESETTING)
745 port_state_machine_change(iport, SCI_PORT_READY);
746 } else
747 sci_port_invalid_link_up(iport, iphy);
748 }
749
750
751
752 /**
753 * This method returns false if the port only has a single phy object assigned.
754 * If there are no phys or more than one phy then the method will return
755 * true.
756 * @sci_port: The port for which the wide port condition is to be checked.
757 *
758 * bool true Is returned if this is a wide ported port. false Is returned if
759 * this is a narrow port.
760 */
sci_port_is_wide(struct isci_port * iport)761 static bool sci_port_is_wide(struct isci_port *iport)
762 {
763 u32 index;
764 u32 phy_count = 0;
765
766 for (index = 0; index < SCI_MAX_PHYS; index++) {
767 if (iport->phy_table[index] != NULL) {
768 phy_count++;
769 }
770 }
771
772 return phy_count != 1;
773 }
774
775 /**
776 * This method is called by the PHY object when the link is detected. if the
777 * port wants the PHY to continue on to the link up state then the port
778 * layer must return true. If the port object returns false the phy object
779 * must halt its attempt to go link up.
780 * @sci_port: The port associated with the phy object.
781 * @sci_phy: The phy object that is trying to go link up.
782 *
783 * true if the phy object can continue to the link up condition. true Is
784 * returned if this phy can continue to the ready state. false Is returned if
785 * can not continue on to the ready state. This notification is in place for
786 * wide ports and direct attached phys. Since there are no wide ported SATA
787 * devices this could become an invalid port configuration.
788 */
sci_port_link_detected(struct isci_port * iport,struct isci_phy * iphy)789 bool sci_port_link_detected(
790 struct isci_port *iport,
791 struct isci_phy *iphy)
792 {
793 if ((iport->logical_port_index != SCIC_SDS_DUMMY_PORT) &&
794 (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA)) {
795 if (sci_port_is_wide(iport)) {
796 sci_port_invalid_link_up(iport, iphy);
797 return false;
798 } else {
799 struct isci_host *ihost = iport->owning_controller;
800 struct isci_port *dst_port = &(ihost->ports[iphy->phy_index]);
801 writel(iphy->phy_index,
802 &dst_port->port_pe_configuration_register[iphy->phy_index]);
803 }
804 }
805
806 return true;
807 }
808
port_timeout(unsigned long data)809 static void port_timeout(unsigned long data)
810 {
811 struct sci_timer *tmr = (struct sci_timer *)data;
812 struct isci_port *iport = container_of(tmr, typeof(*iport), timer);
813 struct isci_host *ihost = iport->owning_controller;
814 unsigned long flags;
815 u32 current_state;
816
817 spin_lock_irqsave(&ihost->scic_lock, flags);
818
819 if (tmr->cancel)
820 goto done;
821
822 current_state = iport->sm.current_state_id;
823
824 if (current_state == SCI_PORT_RESETTING) {
825 /* if the port is still in the resetting state then the timeout
826 * fired before the reset completed.
827 */
828 port_state_machine_change(iport, SCI_PORT_FAILED);
829 } else if (current_state == SCI_PORT_STOPPED) {
830 /* if the port is stopped then the start request failed In this
831 * case stay in the stopped state.
832 */
833 dev_err(sciport_to_dev(iport),
834 "%s: SCIC Port 0x%p failed to stop before tiemout.\n",
835 __func__,
836 iport);
837 } else if (current_state == SCI_PORT_STOPPING) {
838 /* if the port is still stopping then the stop has not completed */
839 isci_port_stop_complete(iport->owning_controller,
840 iport,
841 SCI_FAILURE_TIMEOUT);
842 } else {
843 /* The port is in the ready state and we have a timer
844 * reporting a timeout this should not happen.
845 */
846 dev_err(sciport_to_dev(iport),
847 "%s: SCIC Port 0x%p is processing a timeout operation "
848 "in state %d.\n", __func__, iport, current_state);
849 }
850
851 done:
852 spin_unlock_irqrestore(&ihost->scic_lock, flags);
853 }
854
855 /* --------------------------------------------------------------------------- */
856
857 /**
858 * This function updates the hardwares VIIT entry for this port.
859 *
860 *
861 */
sci_port_update_viit_entry(struct isci_port * iport)862 static void sci_port_update_viit_entry(struct isci_port *iport)
863 {
864 struct sci_sas_address sas_address;
865
866 sci_port_get_sas_address(iport, &sas_address);
867
868 writel(sas_address.high,
869 &iport->viit_registers->initiator_sas_address_hi);
870 writel(sas_address.low,
871 &iport->viit_registers->initiator_sas_address_lo);
872
873 /* This value get cleared just in case its not already cleared */
874 writel(0, &iport->viit_registers->reserved);
875
876 /* We are required to update the status register last */
877 writel(SCU_VIIT_ENTRY_ID_VIIT |
878 SCU_VIIT_IPPT_INITIATOR |
879 ((1 << iport->physical_port_index) << SCU_VIIT_ENTRY_LPVIE_SHIFT) |
880 SCU_VIIT_STATUS_ALL_VALID,
881 &iport->viit_registers->status);
882 }
883
sci_port_get_max_allowed_speed(struct isci_port * iport)884 enum sas_linkrate sci_port_get_max_allowed_speed(struct isci_port *iport)
885 {
886 u16 index;
887 struct isci_phy *iphy;
888 enum sas_linkrate max_allowed_speed = SAS_LINK_RATE_6_0_GBPS;
889
890 /*
891 * Loop through all of the phys in this port and find the phy with the
892 * lowest maximum link rate. */
893 for (index = 0; index < SCI_MAX_PHYS; index++) {
894 iphy = iport->phy_table[index];
895 if (iphy && sci_port_active_phy(iport, iphy) &&
896 iphy->max_negotiated_speed < max_allowed_speed)
897 max_allowed_speed = iphy->max_negotiated_speed;
898 }
899
900 return max_allowed_speed;
901 }
902
sci_port_suspend_port_task_scheduler(struct isci_port * iport)903 static void sci_port_suspend_port_task_scheduler(struct isci_port *iport)
904 {
905 u32 pts_control_value;
906
907 pts_control_value = readl(&iport->port_task_scheduler_registers->control);
908 pts_control_value |= SCU_PTSxCR_GEN_BIT(SUSPEND);
909 writel(pts_control_value, &iport->port_task_scheduler_registers->control);
910 }
911
912 /**
913 * sci_port_post_dummy_request() - post dummy/workaround request
914 * @sci_port: port to post task
915 *
916 * Prevent the hardware scheduler from posting new requests to the front
917 * of the scheduler queue causing a starvation problem for currently
918 * ongoing requests.
919 *
920 */
sci_port_post_dummy_request(struct isci_port * iport)921 static void sci_port_post_dummy_request(struct isci_port *iport)
922 {
923 struct isci_host *ihost = iport->owning_controller;
924 u16 tag = iport->reserved_tag;
925 struct scu_task_context *tc;
926 u32 command;
927
928 tc = &ihost->task_context_table[ISCI_TAG_TCI(tag)];
929 tc->abort = 0;
930
931 command = SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
932 iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT |
933 ISCI_TAG_TCI(tag);
934
935 sci_controller_post_request(ihost, command);
936 }
937
938 /**
939 * This routine will abort the dummy request. This will alow the hardware to
940 * power down parts of the silicon to save power.
941 *
942 * @sci_port: The port on which the task must be aborted.
943 *
944 */
sci_port_abort_dummy_request(struct isci_port * iport)945 static void sci_port_abort_dummy_request(struct isci_port *iport)
946 {
947 struct isci_host *ihost = iport->owning_controller;
948 u16 tag = iport->reserved_tag;
949 struct scu_task_context *tc;
950 u32 command;
951
952 tc = &ihost->task_context_table[ISCI_TAG_TCI(tag)];
953 tc->abort = 1;
954
955 command = SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT |
956 iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT |
957 ISCI_TAG_TCI(tag);
958
959 sci_controller_post_request(ihost, command);
960 }
961
962 /**
963 *
964 * @sci_port: This is the struct isci_port object to resume.
965 *
966 * This method will resume the port task scheduler for this port object. none
967 */
968 static void
sci_port_resume_port_task_scheduler(struct isci_port * iport)969 sci_port_resume_port_task_scheduler(struct isci_port *iport)
970 {
971 u32 pts_control_value;
972
973 pts_control_value = readl(&iport->port_task_scheduler_registers->control);
974 pts_control_value &= ~SCU_PTSxCR_GEN_BIT(SUSPEND);
975 writel(pts_control_value, &iport->port_task_scheduler_registers->control);
976 }
977
sci_port_ready_substate_waiting_enter(struct sci_base_state_machine * sm)978 static void sci_port_ready_substate_waiting_enter(struct sci_base_state_machine *sm)
979 {
980 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
981
982 sci_port_suspend_port_task_scheduler(iport);
983
984 iport->not_ready_reason = SCIC_PORT_NOT_READY_NO_ACTIVE_PHYS;
985
986 if (iport->active_phy_mask != 0) {
987 /* At least one of the phys on the port is ready */
988 port_state_machine_change(iport,
989 SCI_PORT_SUB_OPERATIONAL);
990 }
991 }
992
scic_sds_port_ready_substate_waiting_exit(struct sci_base_state_machine * sm)993 static void scic_sds_port_ready_substate_waiting_exit(
994 struct sci_base_state_machine *sm)
995 {
996 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
997 sci_port_resume_port_task_scheduler(iport);
998 }
999
sci_port_ready_substate_operational_enter(struct sci_base_state_machine * sm)1000 static void sci_port_ready_substate_operational_enter(struct sci_base_state_machine *sm)
1001 {
1002 u32 index;
1003 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1004 struct isci_host *ihost = iport->owning_controller;
1005
1006 isci_port_ready(ihost, iport);
1007
1008 for (index = 0; index < SCI_MAX_PHYS; index++) {
1009 if (iport->phy_table[index]) {
1010 writel(iport->physical_port_index,
1011 &iport->port_pe_configuration_register[
1012 iport->phy_table[index]->phy_index]);
1013 if (((iport->active_phy_mask^iport->enabled_phy_mask) & (1 << index)) != 0)
1014 sci_port_resume_phy(iport, iport->phy_table[index]);
1015 }
1016 }
1017
1018 sci_port_update_viit_entry(iport);
1019
1020 /*
1021 * Post the dummy task for the port so the hardware can schedule
1022 * io correctly
1023 */
1024 sci_port_post_dummy_request(iport);
1025 }
1026
sci_port_invalidate_dummy_remote_node(struct isci_port * iport)1027 static void sci_port_invalidate_dummy_remote_node(struct isci_port *iport)
1028 {
1029 struct isci_host *ihost = iport->owning_controller;
1030 u8 phys_index = iport->physical_port_index;
1031 union scu_remote_node_context *rnc;
1032 u16 rni = iport->reserved_rni;
1033 u32 command;
1034
1035 rnc = &ihost->remote_node_context_table[rni];
1036
1037 rnc->ssp.is_valid = false;
1038
1039 /* ensure the preceding tc abort request has reached the
1040 * controller and give it ample time to act before posting the rnc
1041 * invalidate
1042 */
1043 readl(&ihost->smu_registers->interrupt_status); /* flush */
1044 udelay(10);
1045
1046 command = SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE |
1047 phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni;
1048
1049 sci_controller_post_request(ihost, command);
1050 }
1051
1052 /**
1053 *
1054 * @object: This is the object which is cast to a struct isci_port object.
1055 *
1056 * This method will perform the actions required by the struct isci_port on
1057 * exiting the SCI_PORT_SUB_OPERATIONAL. This function reports
1058 * the port not ready and suspends the port task scheduler. none
1059 */
sci_port_ready_substate_operational_exit(struct sci_base_state_machine * sm)1060 static void sci_port_ready_substate_operational_exit(struct sci_base_state_machine *sm)
1061 {
1062 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1063 struct isci_host *ihost = iport->owning_controller;
1064
1065 /*
1066 * Kill the dummy task for this port if it has not yet posted
1067 * the hardware will treat this as a NOP and just return abort
1068 * complete.
1069 */
1070 sci_port_abort_dummy_request(iport);
1071
1072 isci_port_not_ready(ihost, iport);
1073
1074 if (iport->ready_exit)
1075 sci_port_invalidate_dummy_remote_node(iport);
1076 }
1077
sci_port_ready_substate_configuring_enter(struct sci_base_state_machine * sm)1078 static void sci_port_ready_substate_configuring_enter(struct sci_base_state_machine *sm)
1079 {
1080 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1081 struct isci_host *ihost = iport->owning_controller;
1082
1083 if (iport->active_phy_mask == 0) {
1084 isci_port_not_ready(ihost, iport);
1085
1086 port_state_machine_change(iport, SCI_PORT_SUB_WAITING);
1087 } else
1088 port_state_machine_change(iport, SCI_PORT_SUB_OPERATIONAL);
1089 }
1090
sci_port_start(struct isci_port * iport)1091 enum sci_status sci_port_start(struct isci_port *iport)
1092 {
1093 struct isci_host *ihost = iport->owning_controller;
1094 enum sci_status status = SCI_SUCCESS;
1095 enum sci_port_states state;
1096 u32 phy_mask;
1097
1098 state = iport->sm.current_state_id;
1099 if (state != SCI_PORT_STOPPED) {
1100 dev_warn(sciport_to_dev(iport),
1101 "%s: in wrong state: %d\n", __func__, state);
1102 return SCI_FAILURE_INVALID_STATE;
1103 }
1104
1105 if (iport->assigned_device_count > 0) {
1106 /* TODO This is a start failure operation because
1107 * there are still devices assigned to this port.
1108 * There must be no devices assigned to a port on a
1109 * start operation.
1110 */
1111 return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
1112 }
1113
1114 if (iport->reserved_rni == SCU_DUMMY_INDEX) {
1115 u16 rni = sci_remote_node_table_allocate_remote_node(
1116 &ihost->available_remote_nodes, 1);
1117
1118 if (rni != SCU_DUMMY_INDEX)
1119 sci_port_construct_dummy_rnc(iport, rni);
1120 else
1121 status = SCI_FAILURE_INSUFFICIENT_RESOURCES;
1122 iport->reserved_rni = rni;
1123 }
1124
1125 if (iport->reserved_tag == SCI_CONTROLLER_INVALID_IO_TAG) {
1126 u16 tag;
1127
1128 tag = isci_alloc_tag(ihost);
1129 if (tag == SCI_CONTROLLER_INVALID_IO_TAG)
1130 status = SCI_FAILURE_INSUFFICIENT_RESOURCES;
1131 else
1132 sci_port_construct_dummy_task(iport, tag);
1133 iport->reserved_tag = tag;
1134 }
1135
1136 if (status == SCI_SUCCESS) {
1137 phy_mask = sci_port_get_phys(iport);
1138
1139 /*
1140 * There are one or more phys assigned to this port. Make sure
1141 * the port's phy mask is in fact legal and supported by the
1142 * silicon.
1143 */
1144 if (sci_port_is_phy_mask_valid(iport, phy_mask) == true) {
1145 port_state_machine_change(iport,
1146 SCI_PORT_READY);
1147
1148 return SCI_SUCCESS;
1149 }
1150 status = SCI_FAILURE;
1151 }
1152
1153 if (status != SCI_SUCCESS)
1154 sci_port_destroy_dummy_resources(iport);
1155
1156 return status;
1157 }
1158
sci_port_stop(struct isci_port * iport)1159 enum sci_status sci_port_stop(struct isci_port *iport)
1160 {
1161 enum sci_port_states state;
1162
1163 state = iport->sm.current_state_id;
1164 switch (state) {
1165 case SCI_PORT_STOPPED:
1166 return SCI_SUCCESS;
1167 case SCI_PORT_SUB_WAITING:
1168 case SCI_PORT_SUB_OPERATIONAL:
1169 case SCI_PORT_SUB_CONFIGURING:
1170 case SCI_PORT_RESETTING:
1171 port_state_machine_change(iport,
1172 SCI_PORT_STOPPING);
1173 return SCI_SUCCESS;
1174 default:
1175 dev_warn(sciport_to_dev(iport),
1176 "%s: in wrong state: %d\n", __func__, state);
1177 return SCI_FAILURE_INVALID_STATE;
1178 }
1179 }
1180
sci_port_hard_reset(struct isci_port * iport,u32 timeout)1181 static enum sci_status sci_port_hard_reset(struct isci_port *iport, u32 timeout)
1182 {
1183 enum sci_status status = SCI_FAILURE_INVALID_PHY;
1184 struct isci_phy *iphy = NULL;
1185 enum sci_port_states state;
1186 u32 phy_index;
1187
1188 state = iport->sm.current_state_id;
1189 if (state != SCI_PORT_SUB_OPERATIONAL) {
1190 dev_warn(sciport_to_dev(iport),
1191 "%s: in wrong state: %d\n", __func__, state);
1192 return SCI_FAILURE_INVALID_STATE;
1193 }
1194
1195 /* Select a phy on which we can send the hard reset request. */
1196 for (phy_index = 0; phy_index < SCI_MAX_PHYS && !iphy; phy_index++) {
1197 iphy = iport->phy_table[phy_index];
1198 if (iphy && !sci_port_active_phy(iport, iphy)) {
1199 /*
1200 * We found a phy but it is not ready select
1201 * different phy
1202 */
1203 iphy = NULL;
1204 }
1205 }
1206
1207 /* If we have a phy then go ahead and start the reset procedure */
1208 if (!iphy)
1209 return status;
1210 status = sci_phy_reset(iphy);
1211
1212 if (status != SCI_SUCCESS)
1213 return status;
1214
1215 sci_mod_timer(&iport->timer, timeout);
1216 iport->not_ready_reason = SCIC_PORT_NOT_READY_HARD_RESET_REQUESTED;
1217
1218 port_state_machine_change(iport, SCI_PORT_RESETTING);
1219 return SCI_SUCCESS;
1220 }
1221
1222 /**
1223 * sci_port_add_phy() -
1224 * @sci_port: This parameter specifies the port in which the phy will be added.
1225 * @sci_phy: This parameter is the phy which is to be added to the port.
1226 *
1227 * This method will add a PHY to the selected port. This method returns an
1228 * enum sci_status. SCI_SUCCESS the phy has been added to the port. Any other
1229 * status is a failure to add the phy to the port.
1230 */
sci_port_add_phy(struct isci_port * iport,struct isci_phy * iphy)1231 enum sci_status sci_port_add_phy(struct isci_port *iport,
1232 struct isci_phy *iphy)
1233 {
1234 enum sci_status status;
1235 enum sci_port_states state;
1236
1237 state = iport->sm.current_state_id;
1238 switch (state) {
1239 case SCI_PORT_STOPPED: {
1240 struct sci_sas_address port_sas_address;
1241
1242 /* Read the port assigned SAS Address if there is one */
1243 sci_port_get_sas_address(iport, &port_sas_address);
1244
1245 if (port_sas_address.high != 0 && port_sas_address.low != 0) {
1246 struct sci_sas_address phy_sas_address;
1247
1248 /* Make sure that the PHY SAS Address matches the SAS Address
1249 * for this port
1250 */
1251 sci_phy_get_sas_address(iphy, &phy_sas_address);
1252
1253 if (port_sas_address.high != phy_sas_address.high ||
1254 port_sas_address.low != phy_sas_address.low)
1255 return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
1256 }
1257 return sci_port_set_phy(iport, iphy);
1258 }
1259 case SCI_PORT_SUB_WAITING:
1260 case SCI_PORT_SUB_OPERATIONAL:
1261 status = sci_port_set_phy(iport, iphy);
1262
1263 if (status != SCI_SUCCESS)
1264 return status;
1265
1266 sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY|PF_RESUME);
1267 iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING;
1268 port_state_machine_change(iport, SCI_PORT_SUB_CONFIGURING);
1269
1270 return status;
1271 case SCI_PORT_SUB_CONFIGURING:
1272 status = sci_port_set_phy(iport, iphy);
1273
1274 if (status != SCI_SUCCESS)
1275 return status;
1276 sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY);
1277
1278 /* Re-enter the configuring state since this may be the last phy in
1279 * the port.
1280 */
1281 port_state_machine_change(iport,
1282 SCI_PORT_SUB_CONFIGURING);
1283 return SCI_SUCCESS;
1284 default:
1285 dev_warn(sciport_to_dev(iport),
1286 "%s: in wrong state: %d\n", __func__, state);
1287 return SCI_FAILURE_INVALID_STATE;
1288 }
1289 }
1290
1291 /**
1292 * sci_port_remove_phy() -
1293 * @sci_port: This parameter specifies the port in which the phy will be added.
1294 * @sci_phy: This parameter is the phy which is to be added to the port.
1295 *
1296 * This method will remove the PHY from the selected PORT. This method returns
1297 * an enum sci_status. SCI_SUCCESS the phy has been removed from the port. Any
1298 * other status is a failure to add the phy to the port.
1299 */
sci_port_remove_phy(struct isci_port * iport,struct isci_phy * iphy)1300 enum sci_status sci_port_remove_phy(struct isci_port *iport,
1301 struct isci_phy *iphy)
1302 {
1303 enum sci_status status;
1304 enum sci_port_states state;
1305
1306 state = iport->sm.current_state_id;
1307
1308 switch (state) {
1309 case SCI_PORT_STOPPED:
1310 return sci_port_clear_phy(iport, iphy);
1311 case SCI_PORT_SUB_OPERATIONAL:
1312 status = sci_port_clear_phy(iport, iphy);
1313 if (status != SCI_SUCCESS)
1314 return status;
1315
1316 sci_port_deactivate_phy(iport, iphy, true);
1317 iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING;
1318 port_state_machine_change(iport,
1319 SCI_PORT_SUB_CONFIGURING);
1320 return SCI_SUCCESS;
1321 case SCI_PORT_SUB_CONFIGURING:
1322 status = sci_port_clear_phy(iport, iphy);
1323
1324 if (status != SCI_SUCCESS)
1325 return status;
1326 sci_port_deactivate_phy(iport, iphy, true);
1327
1328 /* Re-enter the configuring state since this may be the last phy in
1329 * the port
1330 */
1331 port_state_machine_change(iport,
1332 SCI_PORT_SUB_CONFIGURING);
1333 return SCI_SUCCESS;
1334 default:
1335 dev_warn(sciport_to_dev(iport),
1336 "%s: in wrong state: %d\n", __func__, state);
1337 return SCI_FAILURE_INVALID_STATE;
1338 }
1339 }
1340
sci_port_link_up(struct isci_port * iport,struct isci_phy * iphy)1341 enum sci_status sci_port_link_up(struct isci_port *iport,
1342 struct isci_phy *iphy)
1343 {
1344 enum sci_port_states state;
1345
1346 state = iport->sm.current_state_id;
1347 switch (state) {
1348 case SCI_PORT_SUB_WAITING:
1349 /* Since this is the first phy going link up for the port we
1350 * can just enable it and continue
1351 */
1352 sci_port_activate_phy(iport, iphy, PF_NOTIFY|PF_RESUME);
1353
1354 port_state_machine_change(iport,
1355 SCI_PORT_SUB_OPERATIONAL);
1356 return SCI_SUCCESS;
1357 case SCI_PORT_SUB_OPERATIONAL:
1358 sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY|PF_RESUME);
1359 return SCI_SUCCESS;
1360 case SCI_PORT_RESETTING:
1361 /* TODO We should make sure that the phy that has gone
1362 * link up is the same one on which we sent the reset. It is
1363 * possible that the phy on which we sent the reset is not the
1364 * one that has gone link up and we want to make sure that
1365 * phy being reset comes back. Consider the case where a
1366 * reset is sent but before the hardware processes the reset it
1367 * get a link up on the port because of a hot plug event.
1368 * because of the reset request this phy will go link down
1369 * almost immediately.
1370 */
1371
1372 /* In the resetting state we don't notify the user regarding
1373 * link up and link down notifications.
1374 */
1375 sci_port_general_link_up_handler(iport, iphy, PF_RESUME);
1376 return SCI_SUCCESS;
1377 default:
1378 dev_warn(sciport_to_dev(iport),
1379 "%s: in wrong state: %d\n", __func__, state);
1380 return SCI_FAILURE_INVALID_STATE;
1381 }
1382 }
1383
sci_port_link_down(struct isci_port * iport,struct isci_phy * iphy)1384 enum sci_status sci_port_link_down(struct isci_port *iport,
1385 struct isci_phy *iphy)
1386 {
1387 enum sci_port_states state;
1388
1389 state = iport->sm.current_state_id;
1390 switch (state) {
1391 case SCI_PORT_SUB_OPERATIONAL:
1392 sci_port_deactivate_phy(iport, iphy, true);
1393
1394 /* If there are no active phys left in the port, then
1395 * transition the port to the WAITING state until such time
1396 * as a phy goes link up
1397 */
1398 if (iport->active_phy_mask == 0)
1399 port_state_machine_change(iport,
1400 SCI_PORT_SUB_WAITING);
1401 return SCI_SUCCESS;
1402 case SCI_PORT_RESETTING:
1403 /* In the resetting state we don't notify the user regarding
1404 * link up and link down notifications. */
1405 sci_port_deactivate_phy(iport, iphy, false);
1406 return SCI_SUCCESS;
1407 default:
1408 dev_warn(sciport_to_dev(iport),
1409 "%s: in wrong state: %d\n", __func__, state);
1410 return SCI_FAILURE_INVALID_STATE;
1411 }
1412 }
1413
sci_port_start_io(struct isci_port * iport,struct isci_remote_device * idev,struct isci_request * ireq)1414 enum sci_status sci_port_start_io(struct isci_port *iport,
1415 struct isci_remote_device *idev,
1416 struct isci_request *ireq)
1417 {
1418 enum sci_port_states state;
1419
1420 state = iport->sm.current_state_id;
1421 switch (state) {
1422 case SCI_PORT_SUB_WAITING:
1423 return SCI_FAILURE_INVALID_STATE;
1424 case SCI_PORT_SUB_OPERATIONAL:
1425 iport->started_request_count++;
1426 return SCI_SUCCESS;
1427 default:
1428 dev_warn(sciport_to_dev(iport),
1429 "%s: in wrong state: %d\n", __func__, state);
1430 return SCI_FAILURE_INVALID_STATE;
1431 }
1432 }
1433
sci_port_complete_io(struct isci_port * iport,struct isci_remote_device * idev,struct isci_request * ireq)1434 enum sci_status sci_port_complete_io(struct isci_port *iport,
1435 struct isci_remote_device *idev,
1436 struct isci_request *ireq)
1437 {
1438 enum sci_port_states state;
1439
1440 state = iport->sm.current_state_id;
1441 switch (state) {
1442 case SCI_PORT_STOPPED:
1443 dev_warn(sciport_to_dev(iport),
1444 "%s: in wrong state: %d\n", __func__, state);
1445 return SCI_FAILURE_INVALID_STATE;
1446 case SCI_PORT_STOPPING:
1447 sci_port_decrement_request_count(iport);
1448
1449 if (iport->started_request_count == 0)
1450 port_state_machine_change(iport,
1451 SCI_PORT_STOPPED);
1452 break;
1453 case SCI_PORT_READY:
1454 case SCI_PORT_RESETTING:
1455 case SCI_PORT_FAILED:
1456 case SCI_PORT_SUB_WAITING:
1457 case SCI_PORT_SUB_OPERATIONAL:
1458 sci_port_decrement_request_count(iport);
1459 break;
1460 case SCI_PORT_SUB_CONFIGURING:
1461 sci_port_decrement_request_count(iport);
1462 if (iport->started_request_count == 0) {
1463 port_state_machine_change(iport,
1464 SCI_PORT_SUB_OPERATIONAL);
1465 }
1466 break;
1467 }
1468 return SCI_SUCCESS;
1469 }
1470
sci_port_enable_port_task_scheduler(struct isci_port * iport)1471 static void sci_port_enable_port_task_scheduler(struct isci_port *iport)
1472 {
1473 u32 pts_control_value;
1474
1475 /* enable the port task scheduler in a suspended state */
1476 pts_control_value = readl(&iport->port_task_scheduler_registers->control);
1477 pts_control_value |= SCU_PTSxCR_GEN_BIT(ENABLE) | SCU_PTSxCR_GEN_BIT(SUSPEND);
1478 writel(pts_control_value, &iport->port_task_scheduler_registers->control);
1479 }
1480
sci_port_disable_port_task_scheduler(struct isci_port * iport)1481 static void sci_port_disable_port_task_scheduler(struct isci_port *iport)
1482 {
1483 u32 pts_control_value;
1484
1485 pts_control_value = readl(&iport->port_task_scheduler_registers->control);
1486 pts_control_value &=
1487 ~(SCU_PTSxCR_GEN_BIT(ENABLE) | SCU_PTSxCR_GEN_BIT(SUSPEND));
1488 writel(pts_control_value, &iport->port_task_scheduler_registers->control);
1489 }
1490
sci_port_post_dummy_remote_node(struct isci_port * iport)1491 static void sci_port_post_dummy_remote_node(struct isci_port *iport)
1492 {
1493 struct isci_host *ihost = iport->owning_controller;
1494 u8 phys_index = iport->physical_port_index;
1495 union scu_remote_node_context *rnc;
1496 u16 rni = iport->reserved_rni;
1497 u32 command;
1498
1499 rnc = &ihost->remote_node_context_table[rni];
1500 rnc->ssp.is_valid = true;
1501
1502 command = SCU_CONTEXT_COMMAND_POST_RNC_32 |
1503 phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni;
1504
1505 sci_controller_post_request(ihost, command);
1506
1507 /* ensure hardware has seen the post rnc command and give it
1508 * ample time to act before sending the suspend
1509 */
1510 readl(&ihost->smu_registers->interrupt_status); /* flush */
1511 udelay(10);
1512
1513 command = SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX_RX |
1514 phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni;
1515
1516 sci_controller_post_request(ihost, command);
1517 }
1518
sci_port_stopped_state_enter(struct sci_base_state_machine * sm)1519 static void sci_port_stopped_state_enter(struct sci_base_state_machine *sm)
1520 {
1521 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1522
1523 if (iport->sm.previous_state_id == SCI_PORT_STOPPING) {
1524 /*
1525 * If we enter this state becasuse of a request to stop
1526 * the port then we want to disable the hardwares port
1527 * task scheduler. */
1528 sci_port_disable_port_task_scheduler(iport);
1529 }
1530 }
1531
sci_port_stopped_state_exit(struct sci_base_state_machine * sm)1532 static void sci_port_stopped_state_exit(struct sci_base_state_machine *sm)
1533 {
1534 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1535
1536 /* Enable and suspend the port task scheduler */
1537 sci_port_enable_port_task_scheduler(iport);
1538 }
1539
sci_port_ready_state_enter(struct sci_base_state_machine * sm)1540 static void sci_port_ready_state_enter(struct sci_base_state_machine *sm)
1541 {
1542 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1543 struct isci_host *ihost = iport->owning_controller;
1544 u32 prev_state;
1545
1546 prev_state = iport->sm.previous_state_id;
1547 if (prev_state == SCI_PORT_RESETTING)
1548 isci_port_hard_reset_complete(iport, SCI_SUCCESS);
1549 else
1550 isci_port_not_ready(ihost, iport);
1551
1552 /* Post and suspend the dummy remote node context for this port. */
1553 sci_port_post_dummy_remote_node(iport);
1554
1555 /* Start the ready substate machine */
1556 port_state_machine_change(iport,
1557 SCI_PORT_SUB_WAITING);
1558 }
1559
sci_port_resetting_state_exit(struct sci_base_state_machine * sm)1560 static void sci_port_resetting_state_exit(struct sci_base_state_machine *sm)
1561 {
1562 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1563
1564 sci_del_timer(&iport->timer);
1565 }
1566
sci_port_stopping_state_exit(struct sci_base_state_machine * sm)1567 static void sci_port_stopping_state_exit(struct sci_base_state_machine *sm)
1568 {
1569 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1570
1571 sci_del_timer(&iport->timer);
1572
1573 sci_port_destroy_dummy_resources(iport);
1574 }
1575
sci_port_failed_state_enter(struct sci_base_state_machine * sm)1576 static void sci_port_failed_state_enter(struct sci_base_state_machine *sm)
1577 {
1578 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1579
1580 isci_port_hard_reset_complete(iport, SCI_FAILURE_TIMEOUT);
1581 }
1582
1583 /* --------------------------------------------------------------------------- */
1584
1585 static const struct sci_base_state sci_port_state_table[] = {
1586 [SCI_PORT_STOPPED] = {
1587 .enter_state = sci_port_stopped_state_enter,
1588 .exit_state = sci_port_stopped_state_exit
1589 },
1590 [SCI_PORT_STOPPING] = {
1591 .exit_state = sci_port_stopping_state_exit
1592 },
1593 [SCI_PORT_READY] = {
1594 .enter_state = sci_port_ready_state_enter,
1595 },
1596 [SCI_PORT_SUB_WAITING] = {
1597 .enter_state = sci_port_ready_substate_waiting_enter,
1598 .exit_state = scic_sds_port_ready_substate_waiting_exit,
1599 },
1600 [SCI_PORT_SUB_OPERATIONAL] = {
1601 .enter_state = sci_port_ready_substate_operational_enter,
1602 .exit_state = sci_port_ready_substate_operational_exit
1603 },
1604 [SCI_PORT_SUB_CONFIGURING] = {
1605 .enter_state = sci_port_ready_substate_configuring_enter
1606 },
1607 [SCI_PORT_RESETTING] = {
1608 .exit_state = sci_port_resetting_state_exit
1609 },
1610 [SCI_PORT_FAILED] = {
1611 .enter_state = sci_port_failed_state_enter,
1612 }
1613 };
1614
sci_port_construct(struct isci_port * iport,u8 index,struct isci_host * ihost)1615 void sci_port_construct(struct isci_port *iport, u8 index,
1616 struct isci_host *ihost)
1617 {
1618 sci_init_sm(&iport->sm, sci_port_state_table, SCI_PORT_STOPPED);
1619
1620 iport->logical_port_index = SCIC_SDS_DUMMY_PORT;
1621 iport->physical_port_index = index;
1622 iport->active_phy_mask = 0;
1623 iport->enabled_phy_mask = 0;
1624 iport->last_active_phy = 0;
1625 iport->ready_exit = false;
1626
1627 iport->owning_controller = ihost;
1628
1629 iport->started_request_count = 0;
1630 iport->assigned_device_count = 0;
1631
1632 iport->reserved_rni = SCU_DUMMY_INDEX;
1633 iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG;
1634
1635 sci_init_timer(&iport->timer, port_timeout);
1636
1637 iport->port_task_scheduler_registers = NULL;
1638
1639 for (index = 0; index < SCI_MAX_PHYS; index++)
1640 iport->phy_table[index] = NULL;
1641 }
1642
isci_port_init(struct isci_port * iport,struct isci_host * ihost,int index)1643 void isci_port_init(struct isci_port *iport, struct isci_host *ihost, int index)
1644 {
1645 INIT_LIST_HEAD(&iport->remote_dev_list);
1646 INIT_LIST_HEAD(&iport->domain_dev_list);
1647 spin_lock_init(&iport->state_lock);
1648 init_completion(&iport->start_complete);
1649 iport->isci_host = ihost;
1650 isci_port_change_state(iport, isci_freed);
1651 }
1652
1653 /**
1654 * isci_port_get_state() - This function gets the status of the port object.
1655 * @isci_port: This parameter points to the isci_port object
1656 *
1657 * status of the object as a isci_status enum.
1658 */
isci_port_get_state(struct isci_port * isci_port)1659 enum isci_status isci_port_get_state(
1660 struct isci_port *isci_port)
1661 {
1662 return isci_port->status;
1663 }
1664
sci_port_broadcast_change_received(struct isci_port * iport,struct isci_phy * iphy)1665 void sci_port_broadcast_change_received(struct isci_port *iport, struct isci_phy *iphy)
1666 {
1667 struct isci_host *ihost = iport->owning_controller;
1668
1669 /* notify the user. */
1670 isci_port_bc_change_received(ihost, iport, iphy);
1671 }
1672
isci_port_perform_hard_reset(struct isci_host * ihost,struct isci_port * iport,struct isci_phy * iphy)1673 int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *iport,
1674 struct isci_phy *iphy)
1675 {
1676 unsigned long flags;
1677 enum sci_status status;
1678 int ret = TMF_RESP_FUNC_COMPLETE;
1679
1680 dev_dbg(&ihost->pdev->dev, "%s: iport = %p\n",
1681 __func__, iport);
1682
1683 init_completion(&iport->hard_reset_complete);
1684
1685 spin_lock_irqsave(&ihost->scic_lock, flags);
1686
1687 #define ISCI_PORT_RESET_TIMEOUT SCIC_SDS_SIGNATURE_FIS_TIMEOUT
1688 status = sci_port_hard_reset(iport, ISCI_PORT_RESET_TIMEOUT);
1689
1690 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1691
1692 if (status == SCI_SUCCESS) {
1693 wait_for_completion(&iport->hard_reset_complete);
1694
1695 dev_dbg(&ihost->pdev->dev,
1696 "%s: iport = %p; hard reset completion\n",
1697 __func__, iport);
1698
1699 if (iport->hard_reset_status != SCI_SUCCESS) {
1700 ret = TMF_RESP_FUNC_FAILED;
1701
1702 dev_err(&ihost->pdev->dev,
1703 "%s: iport = %p; hard reset failed (0x%x)\n",
1704 __func__, iport, iport->hard_reset_status);
1705 }
1706 } else {
1707 ret = TMF_RESP_FUNC_FAILED;
1708
1709 dev_err(&ihost->pdev->dev,
1710 "%s: iport = %p; sci_port_hard_reset call"
1711 " failed 0x%x\n",
1712 __func__, iport, status);
1713
1714 }
1715
1716 /* If the hard reset for the port has failed, consider this
1717 * the same as link failures on all phys in the port.
1718 */
1719 if (ret != TMF_RESP_FUNC_COMPLETE) {
1720
1721 dev_err(&ihost->pdev->dev,
1722 "%s: iport = %p; hard reset failed "
1723 "(0x%x) - driving explicit link fail for all phys\n",
1724 __func__, iport, iport->hard_reset_status);
1725 }
1726 return ret;
1727 }
1728
1729 /**
1730 * isci_port_deformed() - This function is called by libsas when a port becomes
1731 * inactive.
1732 * @phy: This parameter specifies the libsas phy with the inactive port.
1733 *
1734 */
isci_port_deformed(struct asd_sas_phy * phy)1735 void isci_port_deformed(struct asd_sas_phy *phy)
1736 {
1737 pr_debug("%s: sas_phy = %p\n", __func__, phy);
1738 }
1739
1740 /**
1741 * isci_port_formed() - This function is called by libsas when a port becomes
1742 * active.
1743 * @phy: This parameter specifies the libsas phy with the active port.
1744 *
1745 */
isci_port_formed(struct asd_sas_phy * phy)1746 void isci_port_formed(struct asd_sas_phy *phy)
1747 {
1748 pr_debug("%s: sas_phy = %p, sas_port = %p\n", __func__, phy, phy->port);
1749 }
1750