1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (c) 2015 Linaro Ltd.
4  * Copyright (c) 2015 Hisilicon Limited.
5  */
6 
7 #include "hisi_sas.h"
8 #define DRV_NAME "hisi_sas"
9 
10 #define DEV_IS_GONE(dev) \
11 	((!dev) || (dev->dev_type == SAS_PHY_UNUSED))
12 
13 static int hisi_sas_softreset_ata_disk(struct domain_device *device);
14 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
15 				void *funcdata);
16 static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
17 				  struct domain_device *device);
18 static void hisi_sas_dev_gone(struct domain_device *device);
19 
20 struct hisi_sas_internal_abort_data {
21 	bool rst_ha_timeout; /* reset the HA for timeout */
22 };
23 
hisi_sas_get_ata_protocol_from_tf(struct ata_queued_cmd * qc)24 static u8 hisi_sas_get_ata_protocol_from_tf(struct ata_queued_cmd *qc)
25 {
26 	if (!qc)
27 		return HISI_SAS_SATA_PROTOCOL_PIO;
28 
29 	switch (qc->tf.protocol) {
30 	case ATA_PROT_NODATA:
31 		return HISI_SAS_SATA_PROTOCOL_NONDATA;
32 	case ATA_PROT_PIO:
33 		return HISI_SAS_SATA_PROTOCOL_PIO;
34 	case ATA_PROT_DMA:
35 		return HISI_SAS_SATA_PROTOCOL_DMA;
36 	case ATA_PROT_NCQ_NODATA:
37 	case ATA_PROT_NCQ:
38 		return HISI_SAS_SATA_PROTOCOL_FPDMA;
39 	default:
40 		return HISI_SAS_SATA_PROTOCOL_PIO;
41 	}
42 }
43 
hisi_sas_get_ata_protocol(struct sas_task * task)44 u8 hisi_sas_get_ata_protocol(struct sas_task *task)
45 {
46 	struct host_to_dev_fis *fis = &task->ata_task.fis;
47 	struct ata_queued_cmd *qc = task->uldd_task;
48 	int direction = task->data_dir;
49 
50 	switch (fis->command) {
51 	case ATA_CMD_FPDMA_WRITE:
52 	case ATA_CMD_FPDMA_READ:
53 	case ATA_CMD_FPDMA_RECV:
54 	case ATA_CMD_FPDMA_SEND:
55 	case ATA_CMD_NCQ_NON_DATA:
56 		return HISI_SAS_SATA_PROTOCOL_FPDMA;
57 
58 	case ATA_CMD_DOWNLOAD_MICRO:
59 	case ATA_CMD_ID_ATA:
60 	case ATA_CMD_PMP_READ:
61 	case ATA_CMD_READ_LOG_EXT:
62 	case ATA_CMD_PIO_READ:
63 	case ATA_CMD_PIO_READ_EXT:
64 	case ATA_CMD_PMP_WRITE:
65 	case ATA_CMD_WRITE_LOG_EXT:
66 	case ATA_CMD_PIO_WRITE:
67 	case ATA_CMD_PIO_WRITE_EXT:
68 		return HISI_SAS_SATA_PROTOCOL_PIO;
69 
70 	case ATA_CMD_DSM:
71 	case ATA_CMD_DOWNLOAD_MICRO_DMA:
72 	case ATA_CMD_PMP_READ_DMA:
73 	case ATA_CMD_PMP_WRITE_DMA:
74 	case ATA_CMD_READ:
75 	case ATA_CMD_READ_EXT:
76 	case ATA_CMD_READ_LOG_DMA_EXT:
77 	case ATA_CMD_READ_STREAM_DMA_EXT:
78 	case ATA_CMD_TRUSTED_RCV_DMA:
79 	case ATA_CMD_TRUSTED_SND_DMA:
80 	case ATA_CMD_WRITE:
81 	case ATA_CMD_WRITE_EXT:
82 	case ATA_CMD_WRITE_FUA_EXT:
83 	case ATA_CMD_WRITE_QUEUED:
84 	case ATA_CMD_WRITE_LOG_DMA_EXT:
85 	case ATA_CMD_WRITE_STREAM_DMA_EXT:
86 	case ATA_CMD_ZAC_MGMT_IN:
87 		return HISI_SAS_SATA_PROTOCOL_DMA;
88 
89 	case ATA_CMD_CHK_POWER:
90 	case ATA_CMD_DEV_RESET:
91 	case ATA_CMD_EDD:
92 	case ATA_CMD_FLUSH:
93 	case ATA_CMD_FLUSH_EXT:
94 	case ATA_CMD_VERIFY:
95 	case ATA_CMD_VERIFY_EXT:
96 	case ATA_CMD_SET_FEATURES:
97 	case ATA_CMD_STANDBY:
98 	case ATA_CMD_STANDBYNOW1:
99 	case ATA_CMD_ZAC_MGMT_OUT:
100 		return HISI_SAS_SATA_PROTOCOL_NONDATA;
101 
102 	case ATA_CMD_SET_MAX:
103 		switch (fis->features) {
104 		case ATA_SET_MAX_PASSWD:
105 		case ATA_SET_MAX_LOCK:
106 			return HISI_SAS_SATA_PROTOCOL_PIO;
107 
108 		case ATA_SET_MAX_PASSWD_DMA:
109 		case ATA_SET_MAX_UNLOCK_DMA:
110 			return HISI_SAS_SATA_PROTOCOL_DMA;
111 
112 		default:
113 			return HISI_SAS_SATA_PROTOCOL_NONDATA;
114 		}
115 
116 	default:
117 	{
118 		if (direction == DMA_NONE)
119 			return HISI_SAS_SATA_PROTOCOL_NONDATA;
120 		return hisi_sas_get_ata_protocol_from_tf(qc);
121 	}
122 	}
123 }
124 EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol);
125 
hisi_sas_sata_done(struct sas_task * task,struct hisi_sas_slot * slot)126 void hisi_sas_sata_done(struct sas_task *task,
127 			    struct hisi_sas_slot *slot)
128 {
129 	struct task_status_struct *ts = &task->task_status;
130 	struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf;
131 	struct hisi_sas_status_buffer *status_buf =
132 			hisi_sas_status_buf_addr_mem(slot);
133 	u8 *iu = &status_buf->iu[0];
134 	struct dev_to_host_fis *d2h =  (struct dev_to_host_fis *)iu;
135 
136 	resp->frame_len = sizeof(struct dev_to_host_fis);
137 	memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis));
138 
139 	ts->buf_valid_size = sizeof(*resp);
140 }
141 EXPORT_SYMBOL_GPL(hisi_sas_sata_done);
142 
143 /*
144  * This function assumes linkrate mask fits in 8 bits, which it
145  * does for all HW versions supported.
146  */
hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max)147 u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max)
148 {
149 	u8 rate = 0;
150 	int i;
151 
152 	max -= SAS_LINK_RATE_1_5_GBPS;
153 	for (i = 0; i <= max; i++)
154 		rate |= 1 << (i * 2);
155 	return rate;
156 }
157 EXPORT_SYMBOL_GPL(hisi_sas_get_prog_phy_linkrate_mask);
158 
dev_to_hisi_hba(struct domain_device * device)159 static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device)
160 {
161 	return device->port->ha->lldd_ha;
162 }
163 
to_hisi_sas_port(struct asd_sas_port * sas_port)164 struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port)
165 {
166 	return container_of(sas_port, struct hisi_sas_port, sas_port);
167 }
168 EXPORT_SYMBOL_GPL(to_hisi_sas_port);
169 
hisi_sas_stop_phys(struct hisi_hba * hisi_hba)170 void hisi_sas_stop_phys(struct hisi_hba *hisi_hba)
171 {
172 	int phy_no;
173 
174 	for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++)
175 		hisi_sas_phy_enable(hisi_hba, phy_no, 0);
176 }
177 EXPORT_SYMBOL_GPL(hisi_sas_stop_phys);
178 
hisi_sas_slot_index_clear(struct hisi_hba * hisi_hba,int slot_idx)179 static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx)
180 {
181 	void *bitmap = hisi_hba->slot_index_tags;
182 
183 	__clear_bit(slot_idx, bitmap);
184 }
185 
hisi_sas_slot_index_free(struct hisi_hba * hisi_hba,int slot_idx)186 static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx)
187 {
188 	if (hisi_hba->hw->slot_index_alloc ||
189 	    slot_idx < HISI_SAS_RESERVED_IPTT) {
190 		spin_lock(&hisi_hba->lock);
191 		hisi_sas_slot_index_clear(hisi_hba, slot_idx);
192 		spin_unlock(&hisi_hba->lock);
193 	}
194 }
195 
hisi_sas_slot_index_set(struct hisi_hba * hisi_hba,int slot_idx)196 static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx)
197 {
198 	void *bitmap = hisi_hba->slot_index_tags;
199 
200 	__set_bit(slot_idx, bitmap);
201 }
202 
hisi_sas_slot_index_alloc(struct hisi_hba * hisi_hba,struct request * rq)203 static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba,
204 				     struct request *rq)
205 {
206 	int index;
207 	void *bitmap = hisi_hba->slot_index_tags;
208 
209 	if (rq)
210 		return rq->tag + HISI_SAS_RESERVED_IPTT;
211 
212 	spin_lock(&hisi_hba->lock);
213 	index = find_next_zero_bit(bitmap, HISI_SAS_RESERVED_IPTT,
214 				   hisi_hba->last_slot_index + 1);
215 	if (index >= HISI_SAS_RESERVED_IPTT) {
216 		index = find_next_zero_bit(bitmap,
217 				HISI_SAS_RESERVED_IPTT,
218 				0);
219 		if (index >= HISI_SAS_RESERVED_IPTT) {
220 			spin_unlock(&hisi_hba->lock);
221 			return -SAS_QUEUE_FULL;
222 		}
223 	}
224 	hisi_sas_slot_index_set(hisi_hba, index);
225 	hisi_hba->last_slot_index = index;
226 	spin_unlock(&hisi_hba->lock);
227 
228 	return index;
229 }
230 
hisi_sas_slot_task_free(struct hisi_hba * hisi_hba,struct sas_task * task,struct hisi_sas_slot * slot,bool need_lock)231 void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
232 			     struct hisi_sas_slot *slot, bool need_lock)
233 {
234 	int device_id = slot->device_id;
235 	struct hisi_sas_device *sas_dev = &hisi_hba->devices[device_id];
236 
237 	if (task) {
238 		struct device *dev = hisi_hba->dev;
239 
240 		if (!task->lldd_task)
241 			return;
242 
243 		task->lldd_task = NULL;
244 
245 		if (!sas_protocol_ata(task->task_proto)) {
246 			if (slot->n_elem) {
247 				if (task->task_proto & SAS_PROTOCOL_SSP)
248 					dma_unmap_sg(dev, task->scatter,
249 						     task->num_scatter,
250 						     task->data_dir);
251 				else
252 					dma_unmap_sg(dev, &task->smp_task.smp_req,
253 						     1, DMA_TO_DEVICE);
254 			}
255 			if (slot->n_elem_dif) {
256 				struct sas_ssp_task *ssp_task = &task->ssp_task;
257 				struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
258 
259 				dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd),
260 					     scsi_prot_sg_count(scsi_cmnd),
261 					     task->data_dir);
262 			}
263 		}
264 	}
265 
266 	if (need_lock) {
267 		spin_lock(&sas_dev->lock);
268 		list_del_init(&slot->entry);
269 		spin_unlock(&sas_dev->lock);
270 	} else {
271 		list_del_init(&slot->entry);
272 	}
273 
274 	memset(slot, 0, offsetof(struct hisi_sas_slot, buf));
275 
276 	hisi_sas_slot_index_free(hisi_hba, slot->idx);
277 }
278 EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free);
279 
hisi_sas_task_prep_smp(struct hisi_hba * hisi_hba,struct hisi_sas_slot * slot)280 static void hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba,
281 				  struct hisi_sas_slot *slot)
282 {
283 	hisi_hba->hw->prep_smp(hisi_hba, slot);
284 }
285 
hisi_sas_task_prep_ssp(struct hisi_hba * hisi_hba,struct hisi_sas_slot * slot)286 static void hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba,
287 				  struct hisi_sas_slot *slot)
288 {
289 	hisi_hba->hw->prep_ssp(hisi_hba, slot);
290 }
291 
hisi_sas_task_prep_ata(struct hisi_hba * hisi_hba,struct hisi_sas_slot * slot)292 static void hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba,
293 				  struct hisi_sas_slot *slot)
294 {
295 	hisi_hba->hw->prep_stp(hisi_hba, slot);
296 }
297 
hisi_sas_task_prep_abort(struct hisi_hba * hisi_hba,struct hisi_sas_slot * slot)298 static void hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba,
299 				     struct hisi_sas_slot *slot)
300 {
301 	hisi_hba->hw->prep_abort(hisi_hba, slot);
302 }
303 
hisi_sas_dma_unmap(struct hisi_hba * hisi_hba,struct sas_task * task,int n_elem)304 static void hisi_sas_dma_unmap(struct hisi_hba *hisi_hba,
305 			       struct sas_task *task, int n_elem)
306 {
307 	struct device *dev = hisi_hba->dev;
308 
309 	if (!sas_protocol_ata(task->task_proto) && n_elem) {
310 		if (task->num_scatter) {
311 			dma_unmap_sg(dev, task->scatter, task->num_scatter,
312 				     task->data_dir);
313 		} else if (task->task_proto & SAS_PROTOCOL_SMP) {
314 			dma_unmap_sg(dev, &task->smp_task.smp_req,
315 				     1, DMA_TO_DEVICE);
316 		}
317 	}
318 }
319 
hisi_sas_dma_map(struct hisi_hba * hisi_hba,struct sas_task * task,int * n_elem)320 static int hisi_sas_dma_map(struct hisi_hba *hisi_hba,
321 			    struct sas_task *task, int *n_elem)
322 {
323 	struct device *dev = hisi_hba->dev;
324 	int rc;
325 
326 	if (sas_protocol_ata(task->task_proto)) {
327 		*n_elem = task->num_scatter;
328 	} else {
329 		unsigned int req_len;
330 
331 		if (task->num_scatter) {
332 			*n_elem = dma_map_sg(dev, task->scatter,
333 					     task->num_scatter, task->data_dir);
334 			if (!*n_elem) {
335 				rc = -ENOMEM;
336 				goto prep_out;
337 			}
338 		} else if (task->task_proto & SAS_PROTOCOL_SMP) {
339 			*n_elem = dma_map_sg(dev, &task->smp_task.smp_req,
340 					     1, DMA_TO_DEVICE);
341 			if (!*n_elem) {
342 				rc = -ENOMEM;
343 				goto prep_out;
344 			}
345 			req_len = sg_dma_len(&task->smp_task.smp_req);
346 			if (req_len & 0x3) {
347 				rc = -EINVAL;
348 				goto err_out_dma_unmap;
349 			}
350 		}
351 	}
352 
353 	if (*n_elem > HISI_SAS_SGE_PAGE_CNT) {
354 		dev_err(dev, "task prep: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT\n",
355 			*n_elem);
356 		rc = -EINVAL;
357 		goto err_out_dma_unmap;
358 	}
359 	return 0;
360 
361 err_out_dma_unmap:
362 	/* It would be better to call dma_unmap_sg() here, but it's messy */
363 	hisi_sas_dma_unmap(hisi_hba, task, *n_elem);
364 prep_out:
365 	return rc;
366 }
367 
hisi_sas_dif_dma_unmap(struct hisi_hba * hisi_hba,struct sas_task * task,int n_elem_dif)368 static void hisi_sas_dif_dma_unmap(struct hisi_hba *hisi_hba,
369 				   struct sas_task *task, int n_elem_dif)
370 {
371 	struct device *dev = hisi_hba->dev;
372 
373 	if (n_elem_dif) {
374 		struct sas_ssp_task *ssp_task = &task->ssp_task;
375 		struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
376 
377 		dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd),
378 			     scsi_prot_sg_count(scsi_cmnd),
379 			     task->data_dir);
380 	}
381 }
382 
hisi_sas_dif_dma_map(struct hisi_hba * hisi_hba,int * n_elem_dif,struct sas_task * task)383 static int hisi_sas_dif_dma_map(struct hisi_hba *hisi_hba,
384 				int *n_elem_dif, struct sas_task *task)
385 {
386 	struct device *dev = hisi_hba->dev;
387 	struct sas_ssp_task *ssp_task;
388 	struct scsi_cmnd *scsi_cmnd;
389 	int rc;
390 
391 	if (task->num_scatter) {
392 		ssp_task = &task->ssp_task;
393 		scsi_cmnd = ssp_task->cmd;
394 
395 		if (scsi_prot_sg_count(scsi_cmnd)) {
396 			*n_elem_dif = dma_map_sg(dev,
397 						 scsi_prot_sglist(scsi_cmnd),
398 						 scsi_prot_sg_count(scsi_cmnd),
399 						 task->data_dir);
400 
401 			if (!*n_elem_dif)
402 				return -ENOMEM;
403 
404 			if (*n_elem_dif > HISI_SAS_SGE_DIF_PAGE_CNT) {
405 				dev_err(dev, "task prep: n_elem_dif(%d) too large\n",
406 					*n_elem_dif);
407 				rc = -EINVAL;
408 				goto err_out_dif_dma_unmap;
409 			}
410 		}
411 	}
412 
413 	return 0;
414 
415 err_out_dif_dma_unmap:
416 	dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd),
417 		     scsi_prot_sg_count(scsi_cmnd), task->data_dir);
418 	return rc;
419 }
420 
421 static
hisi_sas_task_deliver(struct hisi_hba * hisi_hba,struct hisi_sas_slot * slot,struct hisi_sas_dq * dq,struct hisi_sas_device * sas_dev)422 void hisi_sas_task_deliver(struct hisi_hba *hisi_hba,
423 			   struct hisi_sas_slot *slot,
424 			   struct hisi_sas_dq *dq,
425 			   struct hisi_sas_device *sas_dev)
426 {
427 	struct hisi_sas_cmd_hdr *cmd_hdr_base;
428 	int dlvry_queue_slot, dlvry_queue;
429 	struct sas_task *task = slot->task;
430 	int wr_q_index;
431 
432 	spin_lock(&dq->lock);
433 	wr_q_index = dq->wr_point;
434 	dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS;
435 	list_add_tail(&slot->delivery, &dq->list);
436 	spin_unlock(&dq->lock);
437 	spin_lock(&sas_dev->lock);
438 	list_add_tail(&slot->entry, &sas_dev->list);
439 	spin_unlock(&sas_dev->lock);
440 
441 	dlvry_queue = dq->id;
442 	dlvry_queue_slot = wr_q_index;
443 
444 	slot->device_id = sas_dev->device_id;
445 	slot->dlvry_queue = dlvry_queue;
446 	slot->dlvry_queue_slot = dlvry_queue_slot;
447 	cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
448 	slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
449 
450 	task->lldd_task = slot;
451 
452 	memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
453 	memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
454 	memset(hisi_sas_status_buf_addr_mem(slot), 0,
455 	       sizeof(struct hisi_sas_err_record));
456 
457 	switch (task->task_proto) {
458 	case SAS_PROTOCOL_SMP:
459 		hisi_sas_task_prep_smp(hisi_hba, slot);
460 		break;
461 	case SAS_PROTOCOL_SSP:
462 		hisi_sas_task_prep_ssp(hisi_hba, slot);
463 		break;
464 	case SAS_PROTOCOL_SATA:
465 	case SAS_PROTOCOL_STP:
466 	case SAS_PROTOCOL_STP_ALL:
467 		hisi_sas_task_prep_ata(hisi_hba, slot);
468 		break;
469 	case SAS_PROTOCOL_INTERNAL_ABORT:
470 		hisi_sas_task_prep_abort(hisi_hba, slot);
471 		break;
472 	default:
473 		return;
474 	}
475 
476 	/* Make slot memories observable before marking as ready */
477 	smp_wmb();
478 	WRITE_ONCE(slot->ready, 1);
479 
480 	spin_lock(&dq->lock);
481 	hisi_hba->hw->start_delivery(dq);
482 	spin_unlock(&dq->lock);
483 }
484 
hisi_sas_queue_command(struct sas_task * task,gfp_t gfp_flags)485 static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
486 {
487 	int n_elem = 0, n_elem_dif = 0;
488 	struct domain_device *device = task->dev;
489 	struct asd_sas_port *sas_port = device->port;
490 	struct hisi_sas_device *sas_dev = device->lldd_dev;
491 	bool internal_abort = sas_is_internal_abort(task);
492 	struct hisi_sas_dq *dq = NULL;
493 	struct hisi_sas_port *port;
494 	struct hisi_hba *hisi_hba;
495 	struct hisi_sas_slot *slot;
496 	struct request *rq = NULL;
497 	struct device *dev;
498 	int rc;
499 
500 	if (!sas_port) {
501 		struct task_status_struct *ts = &task->task_status;
502 
503 		ts->resp = SAS_TASK_UNDELIVERED;
504 		ts->stat = SAS_PHY_DOWN;
505 		/*
506 		 * libsas will use dev->port, should
507 		 * not call task_done for sata
508 		 */
509 		if (device->dev_type != SAS_SATA_DEV && !internal_abort)
510 			task->task_done(task);
511 		return -ECOMM;
512 	}
513 
514 	hisi_hba = dev_to_hisi_hba(device);
515 	dev = hisi_hba->dev;
516 
517 	switch (task->task_proto) {
518 	case SAS_PROTOCOL_SSP:
519 	case SAS_PROTOCOL_SMP:
520 	case SAS_PROTOCOL_SATA:
521 	case SAS_PROTOCOL_STP:
522 	case SAS_PROTOCOL_STP_ALL:
523 		if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) {
524 			if (!gfpflags_allow_blocking(gfp_flags))
525 				return -EINVAL;
526 
527 			down(&hisi_hba->sem);
528 			up(&hisi_hba->sem);
529 		}
530 
531 		if (DEV_IS_GONE(sas_dev)) {
532 			if (sas_dev)
533 				dev_info(dev, "task prep: device %d not ready\n",
534 					 sas_dev->device_id);
535 			else
536 				dev_info(dev, "task prep: device %016llx not ready\n",
537 					 SAS_ADDR(device->sas_addr));
538 
539 			return -ECOMM;
540 		}
541 
542 		port = to_hisi_sas_port(sas_port);
543 		if (!port->port_attached) {
544 			dev_info(dev, "task prep: %s port%d not attach device\n",
545 				 dev_is_sata(device) ? "SATA/STP" : "SAS",
546 				 device->port->id);
547 
548 				return -ECOMM;
549 		}
550 
551 		rq = sas_task_find_rq(task);
552 		if (rq) {
553 			unsigned int dq_index;
554 			u32 blk_tag;
555 
556 			blk_tag = blk_mq_unique_tag(rq);
557 			dq_index = blk_mq_unique_tag_to_hwq(blk_tag);
558 			dq = &hisi_hba->dq[dq_index];
559 		} else {
560 			int queue;
561 
562 			if (hisi_hba->iopoll_q_cnt) {
563 				/*
564 				 * Use interrupt queue (queue 0) to deliver and complete
565 				 * internal IOs of libsas or libata when there is at least
566 				 * one iopoll queue
567 				 */
568 				queue = 0;
569 			} else {
570 				struct Scsi_Host *shost = hisi_hba->shost;
571 				struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
572 
573 				queue = qmap->mq_map[raw_smp_processor_id()];
574 			}
575 			dq = &hisi_hba->dq[queue];
576 		}
577 		break;
578 	case SAS_PROTOCOL_INTERNAL_ABORT:
579 		if (!hisi_hba->hw->prep_abort)
580 			return TMF_RESP_FUNC_FAILED;
581 
582 		if (test_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags))
583 			return -EIO;
584 
585 		hisi_hba = dev_to_hisi_hba(device);
586 
587 		if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
588 			return -EINVAL;
589 
590 		port = to_hisi_sas_port(sas_port);
591 		dq = &hisi_hba->dq[task->abort_task.qid];
592 		break;
593 	default:
594 		dev_err(hisi_hba->dev, "task prep: unknown/unsupported proto (0x%x)\n",
595 			task->task_proto);
596 		return -EINVAL;
597 	}
598 
599 	rc = hisi_sas_dma_map(hisi_hba, task, &n_elem);
600 	if (rc < 0)
601 		goto prep_out;
602 
603 	if (!sas_protocol_ata(task->task_proto)) {
604 		rc = hisi_sas_dif_dma_map(hisi_hba, &n_elem_dif, task);
605 		if (rc < 0)
606 			goto err_out_dma_unmap;
607 	}
608 
609 	if (!internal_abort && hisi_hba->hw->slot_index_alloc)
610 		rc = hisi_hba->hw->slot_index_alloc(hisi_hba, device);
611 	else
612 		rc = hisi_sas_slot_index_alloc(hisi_hba, rq);
613 
614 	if (rc < 0)
615 		goto err_out_dif_dma_unmap;
616 
617 	slot = &hisi_hba->slot_info[rc];
618 	slot->n_elem = n_elem;
619 	slot->n_elem_dif = n_elem_dif;
620 	slot->task = task;
621 	slot->port = port;
622 
623 	slot->tmf = task->tmf;
624 	slot->is_internal = !!task->tmf || internal_abort;
625 
626 	/* protect task_prep and start_delivery sequence */
627 	hisi_sas_task_deliver(hisi_hba, slot, dq, sas_dev);
628 
629 	return 0;
630 
631 err_out_dif_dma_unmap:
632 	if (!sas_protocol_ata(task->task_proto))
633 		hisi_sas_dif_dma_unmap(hisi_hba, task, n_elem_dif);
634 err_out_dma_unmap:
635 	hisi_sas_dma_unmap(hisi_hba, task, n_elem);
636 prep_out:
637 	dev_err(dev, "task exec: failed[%d]!\n", rc);
638 	return rc;
639 }
640 
hisi_sas_bytes_dmaed(struct hisi_hba * hisi_hba,int phy_no,gfp_t gfp_flags)641 static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no,
642 				 gfp_t gfp_flags)
643 {
644 	struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
645 	struct asd_sas_phy *sas_phy = &phy->sas_phy;
646 
647 	if (!phy->phy_attached)
648 		return;
649 
650 	sas_notify_phy_event(sas_phy, PHYE_OOB_DONE, gfp_flags);
651 
652 	if (sas_phy->phy) {
653 		struct sas_phy *sphy = sas_phy->phy;
654 
655 		sphy->negotiated_linkrate = sas_phy->linkrate;
656 		sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
657 		sphy->maximum_linkrate_hw =
658 			hisi_hba->hw->phy_get_max_linkrate();
659 		if (sphy->minimum_linkrate == SAS_LINK_RATE_UNKNOWN)
660 			sphy->minimum_linkrate = phy->minimum_linkrate;
661 
662 		if (sphy->maximum_linkrate == SAS_LINK_RATE_UNKNOWN)
663 			sphy->maximum_linkrate = phy->maximum_linkrate;
664 	}
665 
666 	if (phy->phy_type & PORT_TYPE_SAS) {
667 		struct sas_identify_frame *id;
668 
669 		id = (struct sas_identify_frame *)phy->frame_rcvd;
670 		id->dev_type = phy->identify.device_type;
671 		id->initiator_bits = SAS_PROTOCOL_ALL;
672 		id->target_bits = phy->identify.target_port_protocols;
673 	} else if (phy->phy_type & PORT_TYPE_SATA) {
674 		/* Nothing */
675 	}
676 
677 	sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
678 	sas_notify_port_event(sas_phy, PORTE_BYTES_DMAED, gfp_flags);
679 }
680 
hisi_sas_alloc_dev(struct domain_device * device)681 static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
682 {
683 	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
684 	struct hisi_sas_device *sas_dev = NULL;
685 	int last = hisi_hba->last_dev_id;
686 	int first = (hisi_hba->last_dev_id + 1) % HISI_SAS_MAX_DEVICES;
687 	int i;
688 
689 	spin_lock(&hisi_hba->lock);
690 	for (i = first; i != last; i %= HISI_SAS_MAX_DEVICES) {
691 		if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) {
692 			int queue = i % hisi_hba->queue_count;
693 			struct hisi_sas_dq *dq = &hisi_hba->dq[queue];
694 
695 			hisi_hba->devices[i].device_id = i;
696 			sas_dev = &hisi_hba->devices[i];
697 			sas_dev->dev_status = HISI_SAS_DEV_INIT;
698 			sas_dev->dev_type = device->dev_type;
699 			sas_dev->hisi_hba = hisi_hba;
700 			sas_dev->sas_device = device;
701 			sas_dev->dq = dq;
702 			spin_lock_init(&sas_dev->lock);
703 			INIT_LIST_HEAD(&hisi_hba->devices[i].list);
704 			break;
705 		}
706 		i++;
707 	}
708 	hisi_hba->last_dev_id = i;
709 	spin_unlock(&hisi_hba->lock);
710 
711 	return sas_dev;
712 }
713 
hisi_sas_sync_poll_cq(struct hisi_sas_cq * cq)714 static void hisi_sas_sync_poll_cq(struct hisi_sas_cq *cq)
715 {
716 	/* make sure CQ entries being processed are processed to completion */
717 	spin_lock(&cq->poll_lock);
718 	spin_unlock(&cq->poll_lock);
719 }
720 
hisi_sas_queue_is_poll(struct hisi_sas_cq * cq)721 static bool hisi_sas_queue_is_poll(struct hisi_sas_cq *cq)
722 {
723 	struct hisi_hba *hisi_hba = cq->hisi_hba;
724 
725 	if (cq->id < hisi_hba->queue_count - hisi_hba->iopoll_q_cnt)
726 		return false;
727 	return true;
728 }
729 
hisi_sas_sync_cq(struct hisi_sas_cq * cq)730 static void hisi_sas_sync_cq(struct hisi_sas_cq *cq)
731 {
732 	if (hisi_sas_queue_is_poll(cq))
733 		hisi_sas_sync_poll_cq(cq);
734 	else
735 		synchronize_irq(cq->irq_no);
736 }
737 
hisi_sas_sync_poll_cqs(struct hisi_hba * hisi_hba)738 void hisi_sas_sync_poll_cqs(struct hisi_hba *hisi_hba)
739 {
740 	int i;
741 
742 	for (i = 0; i < hisi_hba->queue_count; i++) {
743 		struct hisi_sas_cq *cq = &hisi_hba->cq[i];
744 
745 		if (hisi_sas_queue_is_poll(cq))
746 			hisi_sas_sync_poll_cq(cq);
747 	}
748 }
749 EXPORT_SYMBOL_GPL(hisi_sas_sync_poll_cqs);
750 
hisi_sas_sync_cqs(struct hisi_hba * hisi_hba)751 void hisi_sas_sync_cqs(struct hisi_hba *hisi_hba)
752 {
753 	int i;
754 
755 	for (i = 0; i < hisi_hba->queue_count; i++) {
756 		struct hisi_sas_cq *cq = &hisi_hba->cq[i];
757 
758 		hisi_sas_sync_cq(cq);
759 	}
760 }
761 EXPORT_SYMBOL_GPL(hisi_sas_sync_cqs);
762 
hisi_sas_tmf_aborted(struct sas_task * task)763 static void hisi_sas_tmf_aborted(struct sas_task *task)
764 {
765 	struct hisi_sas_slot *slot = task->lldd_task;
766 	struct domain_device *device = task->dev;
767 	struct hisi_sas_device *sas_dev = device->lldd_dev;
768 	struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
769 
770 	if (slot) {
771 		struct hisi_sas_cq *cq =
772 			   &hisi_hba->cq[slot->dlvry_queue];
773 		/*
774 		 * sync irq or poll queue to avoid free'ing task
775 		 * before using task in IO completion
776 		 */
777 		hisi_sas_sync_cq(cq);
778 		slot->task = NULL;
779 	}
780 }
781 
782 #define HISI_SAS_DISK_RECOVER_CNT 3
hisi_sas_init_device(struct domain_device * device)783 static int hisi_sas_init_device(struct domain_device *device)
784 {
785 	int rc = TMF_RESP_FUNC_COMPLETE;
786 	struct scsi_lun lun;
787 	int retry = HISI_SAS_DISK_RECOVER_CNT;
788 	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
789 
790 	switch (device->dev_type) {
791 	case SAS_END_DEVICE:
792 		int_to_scsilun(0, &lun);
793 
794 		while (retry-- > 0) {
795 			rc = sas_abort_task_set(device, lun.scsi_lun);
796 			if (rc == TMF_RESP_FUNC_COMPLETE) {
797 				hisi_sas_release_task(hisi_hba, device);
798 				break;
799 			}
800 		}
801 		break;
802 	case SAS_SATA_DEV:
803 	case SAS_SATA_PM:
804 	case SAS_SATA_PM_PORT:
805 	case SAS_SATA_PENDING:
806 		/*
807 		 * If an expander is swapped when a SATA disk is attached then
808 		 * we should issue a hard reset to clear previous affiliation
809 		 * of STP target port, see SPL (chapter 6.19.4).
810 		 *
811 		 * However we don't need to issue a hard reset here for these
812 		 * reasons:
813 		 * a. When probing the device, libsas/libata already issues a
814 		 * hard reset in sas_probe_sata() -> ata_port_probe().
815 		 * Note that in hisi_sas_debug_I_T_nexus_reset() we take care
816 		 * to issue a hard reset by checking the dev status (== INIT).
817 		 * b. When resetting the controller, this is simply unnecessary.
818 		 */
819 		while (retry-- > 0) {
820 			rc = hisi_sas_softreset_ata_disk(device);
821 			if (!rc)
822 				break;
823 		}
824 		break;
825 	default:
826 		break;
827 	}
828 
829 	return rc;
830 }
831 
hisi_sas_sdev_init(struct scsi_device * sdev)832 int hisi_sas_sdev_init(struct scsi_device *sdev)
833 {
834 	struct domain_device *ddev = sdev_to_domain_dev(sdev);
835 	struct hisi_sas_device *sas_dev = ddev->lldd_dev;
836 	int rc;
837 
838 	rc = sas_sdev_init(sdev);
839 	if (rc)
840 		return rc;
841 
842 	rc = hisi_sas_init_device(ddev);
843 	if (rc)
844 		return rc;
845 	sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
846 	return 0;
847 }
848 EXPORT_SYMBOL_GPL(hisi_sas_sdev_init);
849 
hisi_sas_dev_found(struct domain_device * device)850 static int hisi_sas_dev_found(struct domain_device *device)
851 {
852 	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
853 	struct domain_device *parent_dev = device->parent;
854 	struct hisi_sas_device *sas_dev;
855 	struct device *dev = hisi_hba->dev;
856 	int rc;
857 
858 	if (hisi_hba->hw->alloc_dev)
859 		sas_dev = hisi_hba->hw->alloc_dev(device);
860 	else
861 		sas_dev = hisi_sas_alloc_dev(device);
862 	if (!sas_dev) {
863 		dev_err(dev, "fail alloc dev: max support %d devices\n",
864 			HISI_SAS_MAX_DEVICES);
865 		return -EINVAL;
866 	}
867 
868 	device->lldd_dev = sas_dev;
869 	hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
870 
871 	if (parent_dev && dev_is_expander(parent_dev->dev_type)) {
872 		int phy_no;
873 
874 		phy_no = sas_find_attached_phy_id(&parent_dev->ex_dev, device);
875 		if (phy_no < 0) {
876 			dev_info(dev, "dev found: no attached "
877 				 "dev:%016llx at ex:%016llx\n",
878 				 SAS_ADDR(device->sas_addr),
879 				 SAS_ADDR(parent_dev->sas_addr));
880 			rc = phy_no;
881 			goto err_out;
882 		}
883 	}
884 
885 	dev_info(dev, "dev[%d:%x] found\n",
886 		sas_dev->device_id, sas_dev->dev_type);
887 
888 	return 0;
889 
890 err_out:
891 	hisi_sas_dev_gone(device);
892 	return rc;
893 }
894 
hisi_sas_sdev_configure(struct scsi_device * sdev,struct queue_limits * lim)895 int hisi_sas_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim)
896 {
897 	struct domain_device *dev = sdev_to_domain_dev(sdev);
898 	int ret = sas_sdev_configure(sdev, lim);
899 
900 	if (ret)
901 		return ret;
902 	if (!dev_is_sata(dev))
903 		sas_change_queue_depth(sdev, 64);
904 
905 	return 0;
906 }
907 EXPORT_SYMBOL_GPL(hisi_sas_sdev_configure);
908 
hisi_sas_scan_start(struct Scsi_Host * shost)909 void hisi_sas_scan_start(struct Scsi_Host *shost)
910 {
911 	struct hisi_hba *hisi_hba = shost_priv(shost);
912 
913 	hisi_hba->hw->phys_init(hisi_hba);
914 }
915 EXPORT_SYMBOL_GPL(hisi_sas_scan_start);
916 
hisi_sas_scan_finished(struct Scsi_Host * shost,unsigned long time)917 int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time)
918 {
919 	struct hisi_hba *hisi_hba = shost_priv(shost);
920 	struct sas_ha_struct *sha = &hisi_hba->sha;
921 
922 	/* Wait for PHY up interrupt to occur */
923 	if (time < HZ)
924 		return 0;
925 
926 	sas_drain_work(sha);
927 	return 1;
928 }
929 EXPORT_SYMBOL_GPL(hisi_sas_scan_finished);
930 
hisi_sas_phyup_work_common(struct work_struct * work,enum hisi_sas_phy_event event)931 static void hisi_sas_phyup_work_common(struct work_struct *work,
932 		enum hisi_sas_phy_event event)
933 {
934 	struct hisi_sas_phy *phy =
935 		container_of(work, typeof(*phy), works[event]);
936 	struct hisi_hba *hisi_hba = phy->hisi_hba;
937 	struct asd_sas_phy *sas_phy = &phy->sas_phy;
938 	struct asd_sas_port *sas_port = sas_phy->port;
939 	struct hisi_sas_port *port = phy->port;
940 	struct device *dev = hisi_hba->dev;
941 	struct domain_device *port_dev;
942 	int phy_no = sas_phy->id;
943 
944 	if (!test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags) &&
945 	    sas_port && port && (port->id != phy->port_id)) {
946 		dev_info(dev, "phy%d's hw port id changed from %d to %llu\n",
947 				phy_no, port->id, phy->port_id);
948 		port_dev = sas_port->port_dev;
949 		if (port_dev && !dev_is_expander(port_dev->dev_type)) {
950 			/*
951 			 * Set the device state to gone to block
952 			 * sending IO to the device.
953 			 */
954 			set_bit(SAS_DEV_GONE, &port_dev->state);
955 			hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET);
956 			return;
957 		}
958 	}
959 
960 	phy->wait_phyup_cnt = 0;
961 	if (phy->identify.target_port_protocols == SAS_PROTOCOL_SSP)
962 		hisi_hba->hw->sl_notify_ssp(hisi_hba, phy_no);
963 	hisi_sas_bytes_dmaed(hisi_hba, phy_no, GFP_KERNEL);
964 }
965 
hisi_sas_phyup_work(struct work_struct * work)966 static void hisi_sas_phyup_work(struct work_struct *work)
967 {
968 	hisi_sas_phyup_work_common(work, HISI_PHYE_PHY_UP);
969 }
970 
hisi_sas_linkreset_work(struct work_struct * work)971 static void hisi_sas_linkreset_work(struct work_struct *work)
972 {
973 	struct hisi_sas_phy *phy =
974 		container_of(work, typeof(*phy), works[HISI_PHYE_LINK_RESET]);
975 	struct asd_sas_phy *sas_phy = &phy->sas_phy;
976 
977 	hisi_sas_control_phy(sas_phy, PHY_FUNC_LINK_RESET, NULL);
978 }
979 
hisi_sas_phyup_pm_work(struct work_struct * work)980 static void hisi_sas_phyup_pm_work(struct work_struct *work)
981 {
982 	struct hisi_sas_phy *phy =
983 		container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP_PM]);
984 	struct hisi_hba *hisi_hba = phy->hisi_hba;
985 	struct device *dev = hisi_hba->dev;
986 
987 	hisi_sas_phyup_work_common(work, HISI_PHYE_PHY_UP_PM);
988 	pm_runtime_put_sync(dev);
989 }
990 
991 static const work_func_t hisi_sas_phye_fns[HISI_PHYES_NUM] = {
992 	[HISI_PHYE_PHY_UP] = hisi_sas_phyup_work,
993 	[HISI_PHYE_LINK_RESET] = hisi_sas_linkreset_work,
994 	[HISI_PHYE_PHY_UP_PM] = hisi_sas_phyup_pm_work,
995 };
996 
hisi_sas_notify_phy_event(struct hisi_sas_phy * phy,enum hisi_sas_phy_event event)997 bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy,
998 				enum hisi_sas_phy_event event)
999 {
1000 	struct hisi_hba *hisi_hba = phy->hisi_hba;
1001 
1002 	if (WARN_ON(event >= HISI_PHYES_NUM))
1003 		return false;
1004 
1005 	return queue_work(hisi_hba->wq, &phy->works[event]);
1006 }
1007 EXPORT_SYMBOL_GPL(hisi_sas_notify_phy_event);
1008 
hisi_sas_wait_phyup_timedout(struct timer_list * t)1009 static void hisi_sas_wait_phyup_timedout(struct timer_list *t)
1010 {
1011 	struct hisi_sas_phy *phy = from_timer(phy, t, timer);
1012 	struct hisi_hba *hisi_hba = phy->hisi_hba;
1013 	struct device *dev = hisi_hba->dev;
1014 	int phy_no = phy->sas_phy.id;
1015 
1016 	dev_warn(dev, "phy%d wait phyup timeout, issuing link reset\n", phy_no);
1017 	hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET);
1018 }
1019 
1020 #define HISI_SAS_WAIT_PHYUP_RETRIES	10
1021 
hisi_sas_phy_oob_ready(struct hisi_hba * hisi_hba,int phy_no)1022 void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no)
1023 {
1024 	struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1025 	struct device *dev = hisi_hba->dev;
1026 	unsigned long flags;
1027 
1028 	dev_dbg(dev, "phy%d OOB ready\n", phy_no);
1029 	spin_lock_irqsave(&phy->lock, flags);
1030 	if (phy->phy_attached) {
1031 		spin_unlock_irqrestore(&phy->lock, flags);
1032 		return;
1033 	}
1034 
1035 	if (!timer_pending(&phy->timer)) {
1036 		if (phy->wait_phyup_cnt < HISI_SAS_WAIT_PHYUP_RETRIES) {
1037 			phy->wait_phyup_cnt++;
1038 			phy->timer.expires = jiffies +
1039 					     HISI_SAS_WAIT_PHYUP_TIMEOUT;
1040 			add_timer(&phy->timer);
1041 			spin_unlock_irqrestore(&phy->lock, flags);
1042 			return;
1043 		}
1044 
1045 		dev_warn(dev, "phy%d failed to come up %d times, giving up\n",
1046 			 phy_no, phy->wait_phyup_cnt);
1047 		phy->wait_phyup_cnt = 0;
1048 	}
1049 	spin_unlock_irqrestore(&phy->lock, flags);
1050 }
1051 
1052 EXPORT_SYMBOL_GPL(hisi_sas_phy_oob_ready);
1053 
hisi_sas_phy_init(struct hisi_hba * hisi_hba,int phy_no)1054 static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
1055 {
1056 	struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1057 	struct asd_sas_phy *sas_phy = &phy->sas_phy;
1058 	int i;
1059 
1060 	phy->hisi_hba = hisi_hba;
1061 	phy->port = NULL;
1062 	phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
1063 	phy->maximum_linkrate = hisi_hba->hw->phy_get_max_linkrate();
1064 	sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0;
1065 	sas_phy->iproto = SAS_PROTOCOL_ALL;
1066 	sas_phy->tproto = 0;
1067 	sas_phy->role = PHY_ROLE_INITIATOR;
1068 	sas_phy->oob_mode = OOB_NOT_CONNECTED;
1069 	sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
1070 	sas_phy->id = phy_no;
1071 	sas_phy->sas_addr = &hisi_hba->sas_addr[0];
1072 	sas_phy->frame_rcvd = &phy->frame_rcvd[0];
1073 	sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata;
1074 	sas_phy->lldd_phy = phy;
1075 
1076 	for (i = 0; i < HISI_PHYES_NUM; i++)
1077 		INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]);
1078 
1079 	spin_lock_init(&phy->lock);
1080 
1081 	timer_setup(&phy->timer, hisi_sas_wait_phyup_timedout, 0);
1082 }
1083 
1084 /* Wrapper to ensure we track hisi_sas_phy.enable properly */
hisi_sas_phy_enable(struct hisi_hba * hisi_hba,int phy_no,int enable)1085 void hisi_sas_phy_enable(struct hisi_hba *hisi_hba, int phy_no, int enable)
1086 {
1087 	struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1088 	struct asd_sas_phy *aphy = &phy->sas_phy;
1089 	struct sas_phy *sphy = aphy->phy;
1090 	unsigned long flags;
1091 
1092 	spin_lock_irqsave(&phy->lock, flags);
1093 
1094 	if (enable) {
1095 		/* We may have been enabled already; if so, don't touch */
1096 		if (!phy->enable)
1097 			sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
1098 		hisi_hba->hw->phy_start(hisi_hba, phy_no);
1099 	} else {
1100 		sphy->negotiated_linkrate = SAS_PHY_DISABLED;
1101 		hisi_hba->hw->phy_disable(hisi_hba, phy_no);
1102 	}
1103 	phy->enable = enable;
1104 	spin_unlock_irqrestore(&phy->lock, flags);
1105 }
1106 EXPORT_SYMBOL_GPL(hisi_sas_phy_enable);
1107 
hisi_sas_port_notify_formed(struct asd_sas_phy * sas_phy)1108 static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
1109 {
1110 	struct hisi_sas_phy *phy = sas_phy->lldd_phy;
1111 	struct asd_sas_port *sas_port = sas_phy->port;
1112 	struct hisi_sas_port *port;
1113 
1114 	if (!sas_port)
1115 		return;
1116 
1117 	port = to_hisi_sas_port(sas_port);
1118 	port->port_attached = 1;
1119 	port->id = phy->port_id;
1120 	phy->port = port;
1121 	sas_port->lldd_port = port;
1122 }
1123 
hisi_sas_do_release_task(struct hisi_hba * hisi_hba,struct sas_task * task,struct hisi_sas_slot * slot,bool need_lock)1124 static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task *task,
1125 				     struct hisi_sas_slot *slot, bool need_lock)
1126 {
1127 	if (task) {
1128 		unsigned long flags;
1129 		struct task_status_struct *ts;
1130 
1131 		ts = &task->task_status;
1132 
1133 		ts->resp = SAS_TASK_COMPLETE;
1134 		ts->stat = SAS_ABORTED_TASK;
1135 		spin_lock_irqsave(&task->task_state_lock, flags);
1136 		task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
1137 		if (!slot->is_internal && task->task_proto != SAS_PROTOCOL_SMP)
1138 			task->task_state_flags |= SAS_TASK_STATE_DONE;
1139 		spin_unlock_irqrestore(&task->task_state_lock, flags);
1140 	}
1141 
1142 	hisi_sas_slot_task_free(hisi_hba, task, slot, need_lock);
1143 }
1144 
hisi_sas_release_task(struct hisi_hba * hisi_hba,struct domain_device * device)1145 static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
1146 			struct domain_device *device)
1147 {
1148 	struct hisi_sas_slot *slot, *slot2;
1149 	struct hisi_sas_device *sas_dev = device->lldd_dev;
1150 
1151 	spin_lock(&sas_dev->lock);
1152 	list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry)
1153 		hisi_sas_do_release_task(hisi_hba, slot->task, slot, false);
1154 
1155 	spin_unlock(&sas_dev->lock);
1156 }
1157 
hisi_sas_release_tasks(struct hisi_hba * hisi_hba)1158 void hisi_sas_release_tasks(struct hisi_hba *hisi_hba)
1159 {
1160 	struct hisi_sas_device *sas_dev;
1161 	struct domain_device *device;
1162 	int i;
1163 
1164 	for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1165 		sas_dev = &hisi_hba->devices[i];
1166 		device = sas_dev->sas_device;
1167 
1168 		if ((sas_dev->dev_type == SAS_PHY_UNUSED) ||
1169 		    !device)
1170 			continue;
1171 
1172 		hisi_sas_release_task(hisi_hba, device);
1173 	}
1174 }
1175 EXPORT_SYMBOL_GPL(hisi_sas_release_tasks);
1176 
hisi_sas_dereg_device(struct hisi_hba * hisi_hba,struct domain_device * device)1177 static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba,
1178 				struct domain_device *device)
1179 {
1180 	if (hisi_hba->hw->dereg_device)
1181 		hisi_hba->hw->dereg_device(hisi_hba, device);
1182 }
1183 
1184 static int
hisi_sas_internal_task_abort_dev(struct hisi_sas_device * sas_dev,bool rst_ha_timeout)1185 hisi_sas_internal_task_abort_dev(struct hisi_sas_device *sas_dev,
1186 				 bool rst_ha_timeout)
1187 {
1188 	struct hisi_sas_internal_abort_data data = { rst_ha_timeout };
1189 	struct domain_device *device = sas_dev->sas_device;
1190 	struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
1191 	int i, rc;
1192 
1193 	for (i = 0; i < hisi_hba->cq_nvecs; i++) {
1194 		struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1195 		const struct cpumask *mask = cq->irq_mask;
1196 
1197 		if (mask && !cpumask_intersects(cpu_online_mask, mask))
1198 			continue;
1199 		rc = sas_execute_internal_abort_dev(device, i, &data);
1200 		if (rc)
1201 			return rc;
1202 	}
1203 
1204 	return 0;
1205 }
1206 
hisi_sas_dev_gone(struct domain_device * device)1207 static void hisi_sas_dev_gone(struct domain_device *device)
1208 {
1209 	struct hisi_sas_device *sas_dev = device->lldd_dev;
1210 	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1211 	struct device *dev = hisi_hba->dev;
1212 	int ret = 0;
1213 
1214 	dev_info(dev, "dev[%d:%x] is gone\n",
1215 		 sas_dev->device_id, sas_dev->dev_type);
1216 
1217 	down(&hisi_hba->sem);
1218 	if (!test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) {
1219 		hisi_sas_internal_task_abort_dev(sas_dev, true);
1220 
1221 		hisi_sas_dereg_device(hisi_hba, device);
1222 
1223 		ret = hisi_hba->hw->clear_itct(hisi_hba, sas_dev);
1224 		device->lldd_dev = NULL;
1225 	}
1226 
1227 	if (hisi_hba->hw->free_device)
1228 		hisi_hba->hw->free_device(sas_dev);
1229 
1230 	/* Don't mark it as SAS_PHY_UNUSED if failed to clear ITCT */
1231 	if (!ret)
1232 		sas_dev->dev_type = SAS_PHY_UNUSED;
1233 	sas_dev->sas_device = NULL;
1234 	up(&hisi_hba->sem);
1235 }
1236 
hisi_sas_phy_set_linkrate(struct hisi_hba * hisi_hba,int phy_no,struct sas_phy_linkrates * r)1237 static int hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no,
1238 			struct sas_phy_linkrates *r)
1239 {
1240 	struct sas_phy_linkrates _r;
1241 
1242 	struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1243 	struct asd_sas_phy *sas_phy = &phy->sas_phy;
1244 	enum sas_linkrate min, max;
1245 
1246 	if (r->minimum_linkrate > SAS_LINK_RATE_1_5_GBPS)
1247 		return -EINVAL;
1248 
1249 	if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) {
1250 		max = sas_phy->phy->maximum_linkrate;
1251 		min = r->minimum_linkrate;
1252 	} else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) {
1253 		max = r->maximum_linkrate;
1254 		min = sas_phy->phy->minimum_linkrate;
1255 	} else
1256 		return -EINVAL;
1257 
1258 	_r.maximum_linkrate = max;
1259 	_r.minimum_linkrate = min;
1260 
1261 	sas_phy->phy->maximum_linkrate = max;
1262 	sas_phy->phy->minimum_linkrate = min;
1263 
1264 	hisi_sas_phy_enable(hisi_hba, phy_no, 0);
1265 	msleep(100);
1266 	hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, &_r);
1267 	hisi_sas_phy_enable(hisi_hba, phy_no, 1);
1268 
1269 	return 0;
1270 }
1271 
hisi_sas_control_phy(struct asd_sas_phy * sas_phy,enum phy_func func,void * funcdata)1272 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
1273 				void *funcdata)
1274 {
1275 	struct hisi_sas_phy *phy = container_of(sas_phy,
1276 			struct hisi_sas_phy, sas_phy);
1277 	struct sas_ha_struct *sas_ha = sas_phy->ha;
1278 	struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
1279 	struct device *dev = hisi_hba->dev;
1280 	DECLARE_COMPLETION_ONSTACK(completion);
1281 	int phy_no = sas_phy->id;
1282 	u8 sts = phy->phy_attached;
1283 	int ret = 0;
1284 
1285 	down(&hisi_hba->sem);
1286 	phy->reset_completion = &completion;
1287 
1288 	switch (func) {
1289 	case PHY_FUNC_HARD_RESET:
1290 		hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no);
1291 		break;
1292 
1293 	case PHY_FUNC_LINK_RESET:
1294 		hisi_sas_phy_enable(hisi_hba, phy_no, 0);
1295 		msleep(100);
1296 		hisi_sas_phy_enable(hisi_hba, phy_no, 1);
1297 		break;
1298 
1299 	case PHY_FUNC_DISABLE:
1300 		hisi_sas_phy_enable(hisi_hba, phy_no, 0);
1301 		goto out;
1302 
1303 	case PHY_FUNC_SET_LINK_RATE:
1304 		ret = hisi_sas_phy_set_linkrate(hisi_hba, phy_no, funcdata);
1305 		break;
1306 
1307 	case PHY_FUNC_GET_EVENTS:
1308 		if (hisi_hba->hw->get_events) {
1309 			hisi_hba->hw->get_events(hisi_hba, phy_no);
1310 			goto out;
1311 		}
1312 		fallthrough;
1313 	case PHY_FUNC_RELEASE_SPINUP_HOLD:
1314 	default:
1315 		ret = -EOPNOTSUPP;
1316 		goto out;
1317 	}
1318 
1319 	if (sts && !wait_for_completion_timeout(&completion,
1320 		HISI_SAS_WAIT_PHYUP_TIMEOUT)) {
1321 		dev_warn(dev, "phy%d wait phyup timed out for func %d\n",
1322 			 phy_no, func);
1323 		if (phy->in_reset)
1324 			ret = -ETIMEDOUT;
1325 	}
1326 
1327 out:
1328 	phy->reset_completion = NULL;
1329 
1330 	up(&hisi_hba->sem);
1331 	return ret;
1332 }
1333 
hisi_sas_fill_ata_reset_cmd(struct ata_device * dev,bool reset,int pmp,u8 * fis)1334 static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev,
1335 		bool reset, int pmp, u8 *fis)
1336 {
1337 	struct ata_taskfile tf;
1338 
1339 	ata_tf_init(dev, &tf);
1340 	if (reset)
1341 		tf.ctl |= ATA_SRST;
1342 	else
1343 		tf.ctl &= ~ATA_SRST;
1344 	tf.command = ATA_CMD_DEV_RESET;
1345 	ata_tf_to_fis(&tf, pmp, 0, fis);
1346 }
1347 
hisi_sas_softreset_ata_disk(struct domain_device * device)1348 static int hisi_sas_softreset_ata_disk(struct domain_device *device)
1349 {
1350 	u8 fis[20] = {0};
1351 	struct ata_port *ap = device->sata_dev.ap;
1352 	struct ata_link *link;
1353 	int rc = TMF_RESP_FUNC_FAILED;
1354 	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1355 	struct device *dev = hisi_hba->dev;
1356 
1357 	ata_for_each_link(link, ap, EDGE) {
1358 		int pmp = sata_srst_pmp(link);
1359 
1360 		hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
1361 		rc = sas_execute_ata_cmd(device, fis, -1);
1362 		if (rc != TMF_RESP_FUNC_COMPLETE)
1363 			break;
1364 	}
1365 
1366 	if (rc == TMF_RESP_FUNC_COMPLETE) {
1367 		usleep_range(900, 1000);
1368 		ata_for_each_link(link, ap, EDGE) {
1369 			int pmp = sata_srst_pmp(link);
1370 
1371 			hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis);
1372 			rc = sas_execute_ata_cmd(device, fis, -1);
1373 			if (rc != TMF_RESP_FUNC_COMPLETE)
1374 				dev_err(dev, "ata disk %016llx de-reset failed\n",
1375 					SAS_ADDR(device->sas_addr));
1376 		}
1377 	} else {
1378 		dev_err(dev, "ata disk %016llx reset failed\n",
1379 			SAS_ADDR(device->sas_addr));
1380 	}
1381 
1382 	if (rc == TMF_RESP_FUNC_COMPLETE)
1383 		hisi_sas_release_task(hisi_hba, device);
1384 
1385 	return rc;
1386 }
1387 
hisi_sas_refresh_port_id(struct hisi_hba * hisi_hba)1388 static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba)
1389 {
1390 	u32 state = hisi_hba->hw->get_phys_state(hisi_hba);
1391 	int i;
1392 
1393 	for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1394 		struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
1395 		struct domain_device *device = sas_dev->sas_device;
1396 		struct asd_sas_port *sas_port;
1397 		struct hisi_sas_port *port;
1398 		struct hisi_sas_phy *phy = NULL;
1399 		struct asd_sas_phy *sas_phy;
1400 
1401 		if ((sas_dev->dev_type == SAS_PHY_UNUSED)
1402 				|| !device || !device->port)
1403 			continue;
1404 
1405 		sas_port = device->port;
1406 		port = to_hisi_sas_port(sas_port);
1407 
1408 		spin_lock(&sas_port->phy_list_lock);
1409 		list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el)
1410 			if (state & BIT(sas_phy->id)) {
1411 				phy = sas_phy->lldd_phy;
1412 				break;
1413 			}
1414 		spin_unlock(&sas_port->phy_list_lock);
1415 
1416 		if (phy) {
1417 			port->id = phy->port_id;
1418 
1419 			/* Update linkrate of directly attached device. */
1420 			if (!device->parent)
1421 				device->linkrate = phy->sas_phy.linkrate;
1422 
1423 			hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
1424 		} else if (!port->port_attached)
1425 			port->id = 0xff;
1426 	}
1427 }
1428 
hisi_sas_rescan_topology(struct hisi_hba * hisi_hba,u32 state)1429 static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 state)
1430 {
1431 	u32 new_state = hisi_hba->hw->get_phys_state(hisi_hba);
1432 	struct asd_sas_port *_sas_port = NULL;
1433 	int phy_no;
1434 
1435 	for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
1436 		struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1437 		struct asd_sas_phy *sas_phy = &phy->sas_phy;
1438 		struct asd_sas_port *sas_port = sas_phy->port;
1439 		bool do_port_check = _sas_port != sas_port;
1440 
1441 		if (!sas_phy->phy->enabled)
1442 			continue;
1443 
1444 		/* Report PHY state change to libsas */
1445 		if (new_state & BIT(phy_no)) {
1446 			if (do_port_check && sas_port && sas_port->port_dev) {
1447 				struct domain_device *dev = sas_port->port_dev;
1448 
1449 				_sas_port = sas_port;
1450 
1451 				if (dev_is_expander(dev->dev_type))
1452 					sas_notify_port_event(sas_phy,
1453 							PORTE_BROADCAST_RCVD,
1454 							GFP_KERNEL);
1455 			}
1456 		} else {
1457 			hisi_sas_phy_down(hisi_hba, phy_no, 0, GFP_KERNEL);
1458 
1459 			/*
1460 			 * The new_state is not ready but old_state is ready,
1461 			 * the two possible causes:
1462 			 * 1. The connected device is removed
1463 			 * 2. Device exists but phyup timed out
1464 			 */
1465 			if (state & BIT(phy_no))
1466 				hisi_sas_notify_phy_event(phy,
1467 							  HISI_PHYE_LINK_RESET);
1468 		}
1469 	}
1470 }
1471 
hisi_sas_reset_init_all_devices(struct hisi_hba * hisi_hba)1472 static void hisi_sas_reset_init_all_devices(struct hisi_hba *hisi_hba)
1473 {
1474 	struct hisi_sas_device *sas_dev;
1475 	struct domain_device *device;
1476 	int i;
1477 
1478 	for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1479 		sas_dev = &hisi_hba->devices[i];
1480 		device = sas_dev->sas_device;
1481 
1482 		if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device)
1483 			continue;
1484 
1485 		hisi_sas_init_device(device);
1486 	}
1487 }
1488 
hisi_sas_send_ata_reset_each_phy(struct hisi_hba * hisi_hba,struct asd_sas_port * sas_port,struct domain_device * device)1489 static void hisi_sas_send_ata_reset_each_phy(struct hisi_hba *hisi_hba,
1490 					     struct asd_sas_port *sas_port,
1491 					     struct domain_device *device)
1492 {
1493 	struct ata_port *ap = device->sata_dev.ap;
1494 	struct device *dev = hisi_hba->dev;
1495 	int rc = TMF_RESP_FUNC_FAILED;
1496 	struct ata_link *link;
1497 	u8 fis[20] = {0};
1498 	int i;
1499 
1500 	for (i = 0; i < hisi_hba->n_phy; i++) {
1501 		if (!(sas_port->phy_mask & BIT(i)))
1502 			continue;
1503 
1504 		ata_for_each_link(link, ap, EDGE) {
1505 			int pmp = sata_srst_pmp(link);
1506 
1507 			hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
1508 			rc = sas_execute_ata_cmd(device, fis, i);
1509 			if (rc != TMF_RESP_FUNC_COMPLETE) {
1510 				dev_err(dev, "phy%d ata reset failed rc=%d\n",
1511 					i, rc);
1512 				break;
1513 			}
1514 		}
1515 	}
1516 }
1517 
hisi_sas_terminate_stp_reject(struct hisi_hba * hisi_hba)1518 static void hisi_sas_terminate_stp_reject(struct hisi_hba *hisi_hba)
1519 {
1520 	struct device *dev = hisi_hba->dev;
1521 	int port_no, rc, i;
1522 
1523 	for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1524 		struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
1525 		struct domain_device *device = sas_dev->sas_device;
1526 
1527 		if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device)
1528 			continue;
1529 
1530 		rc = hisi_sas_internal_task_abort_dev(sas_dev, false);
1531 		if (rc < 0)
1532 			dev_err(dev, "STP reject: abort dev failed %d\n", rc);
1533 	}
1534 
1535 	for (port_no = 0; port_no < hisi_hba->n_phy; port_no++) {
1536 		struct hisi_sas_port *port = &hisi_hba->port[port_no];
1537 		struct asd_sas_port *sas_port = &port->sas_port;
1538 		struct domain_device *port_dev = sas_port->port_dev;
1539 		struct domain_device *device;
1540 
1541 		if (!port_dev || !dev_is_expander(port_dev->dev_type))
1542 			continue;
1543 
1544 		/* Try to find a SATA device */
1545 		list_for_each_entry(device, &sas_port->dev_list,
1546 				    dev_list_node) {
1547 			if (dev_is_sata(device)) {
1548 				hisi_sas_send_ata_reset_each_phy(hisi_hba,
1549 								 sas_port,
1550 								 device);
1551 				break;
1552 			}
1553 		}
1554 	}
1555 }
1556 
hisi_sas_controller_reset_prepare(struct hisi_hba * hisi_hba)1557 void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba)
1558 {
1559 	struct Scsi_Host *shost = hisi_hba->shost;
1560 
1561 	hisi_hba->phy_state = hisi_hba->hw->get_phys_state(hisi_hba);
1562 
1563 	scsi_block_requests(shost);
1564 	hisi_hba->hw->wait_cmds_complete_timeout(hisi_hba, 100, 5000);
1565 
1566 	/*
1567 	 * hisi_hba->timer is only used for v1/v2 hw, and check hw->sht
1568 	 * which is also only used for v1/v2 hw to skip it for v3 hw
1569 	 */
1570 	if (hisi_hba->hw->sht)
1571 		timer_delete_sync(&hisi_hba->timer);
1572 
1573 	set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1574 }
1575 EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_prepare);
1576 
hisi_sas_async_init_wait_phyup(void * data,async_cookie_t cookie)1577 static void hisi_sas_async_init_wait_phyup(void *data, async_cookie_t cookie)
1578 {
1579 	struct hisi_sas_phy *phy = data;
1580 	struct hisi_hba *hisi_hba = phy->hisi_hba;
1581 	struct device *dev = hisi_hba->dev;
1582 	DECLARE_COMPLETION_ONSTACK(completion);
1583 	int phy_no = phy->sas_phy.id;
1584 
1585 	phy->reset_completion = &completion;
1586 	hisi_sas_phy_enable(hisi_hba, phy_no, 1);
1587 	if (!wait_for_completion_timeout(&completion,
1588 					 HISI_SAS_WAIT_PHYUP_TIMEOUT))
1589 		dev_warn(dev, "phy%d wait phyup timed out\n", phy_no);
1590 
1591 	phy->reset_completion = NULL;
1592 }
1593 
hisi_sas_controller_reset_done(struct hisi_hba * hisi_hba)1594 void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba)
1595 {
1596 	struct Scsi_Host *shost = hisi_hba->shost;
1597 	ASYNC_DOMAIN_EXCLUSIVE(async);
1598 	int phy_no;
1599 
1600 	/* Init and wait for PHYs to come up and all libsas event finished. */
1601 	for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
1602 		struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1603 		struct asd_sas_phy *sas_phy = &phy->sas_phy;
1604 
1605 		if (!sas_phy->phy->enabled)
1606 			continue;
1607 
1608 		if (!(hisi_hba->phy_state & BIT(phy_no))) {
1609 			hisi_sas_phy_enable(hisi_hba, phy_no, 1);
1610 			continue;
1611 		}
1612 
1613 		async_schedule_domain(hisi_sas_async_init_wait_phyup,
1614 				      phy, &async);
1615 	}
1616 
1617 	async_synchronize_full_domain(&async);
1618 	hisi_sas_refresh_port_id(hisi_hba);
1619 	clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1620 
1621 	if (hisi_hba->reject_stp_links_msk)
1622 		hisi_sas_terminate_stp_reject(hisi_hba);
1623 	hisi_sas_reset_init_all_devices(hisi_hba);
1624 	scsi_unblock_requests(shost);
1625 	clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags);
1626 	up(&hisi_hba->sem);
1627 
1628 	hisi_sas_rescan_topology(hisi_hba, hisi_hba->phy_state);
1629 }
1630 EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_done);
1631 
hisi_sas_controller_prereset(struct hisi_hba * hisi_hba)1632 static int hisi_sas_controller_prereset(struct hisi_hba *hisi_hba)
1633 {
1634 	if (!hisi_hba->hw->soft_reset)
1635 		return -ENOENT;
1636 
1637 	down(&hisi_hba->sem);
1638 	if (test_and_set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) {
1639 		up(&hisi_hba->sem);
1640 		return -EPERM;
1641 	}
1642 
1643 	if (hisi_sas_debugfs_enable)
1644 		hisi_hba->hw->debugfs_snapshot_regs(hisi_hba);
1645 
1646 	return 0;
1647 }
1648 
hisi_sas_controller_reset(struct hisi_hba * hisi_hba)1649 static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
1650 {
1651 	struct device *dev = hisi_hba->dev;
1652 	struct Scsi_Host *shost = hisi_hba->shost;
1653 	int rc;
1654 
1655 	dev_info(dev, "controller resetting...\n");
1656 	hisi_sas_controller_reset_prepare(hisi_hba);
1657 
1658 	rc = hisi_hba->hw->soft_reset(hisi_hba);
1659 	if (rc) {
1660 		dev_warn(dev, "controller reset failed (%d)\n", rc);
1661 		clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1662 		up(&hisi_hba->sem);
1663 		scsi_unblock_requests(shost);
1664 		clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags);
1665 		return rc;
1666 	}
1667 	clear_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags);
1668 
1669 	hisi_sas_controller_reset_done(hisi_hba);
1670 	dev_info(dev, "controller reset complete\n");
1671 
1672 	return 0;
1673 }
1674 
hisi_sas_abort_task(struct sas_task * task)1675 static int hisi_sas_abort_task(struct sas_task *task)
1676 {
1677 	struct hisi_sas_internal_abort_data internal_abort_data = { false };
1678 	struct domain_device *device = task->dev;
1679 	struct hisi_sas_device *sas_dev = device->lldd_dev;
1680 	struct hisi_sas_slot *slot = task->lldd_task;
1681 	struct hisi_hba *hisi_hba;
1682 	struct device *dev;
1683 	int rc = TMF_RESP_FUNC_FAILED;
1684 	unsigned long flags;
1685 
1686 	if (!sas_dev)
1687 		return TMF_RESP_FUNC_FAILED;
1688 
1689 	hisi_hba = dev_to_hisi_hba(task->dev);
1690 	dev = hisi_hba->dev;
1691 
1692 	spin_lock_irqsave(&task->task_state_lock, flags);
1693 	if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1694 		struct hisi_sas_cq *cq;
1695 
1696 		if (slot) {
1697 			/*
1698 			 * sync irq or poll queue to avoid free'ing task
1699 			 * before using task in IO completion
1700 			 */
1701 			cq = &hisi_hba->cq[slot->dlvry_queue];
1702 			hisi_sas_sync_cq(cq);
1703 		}
1704 		spin_unlock_irqrestore(&task->task_state_lock, flags);
1705 		rc = TMF_RESP_FUNC_COMPLETE;
1706 		goto out;
1707 	}
1708 	task->task_state_flags |= SAS_TASK_STATE_ABORTED;
1709 	spin_unlock_irqrestore(&task->task_state_lock, flags);
1710 
1711 	if (!slot)
1712 		goto out;
1713 
1714 	if (task->task_proto & SAS_PROTOCOL_SSP) {
1715 		u16 tag = slot->idx;
1716 		int rc2;
1717 
1718 		rc = sas_abort_task(task, tag);
1719 		rc2 = sas_execute_internal_abort_single(device, tag,
1720 				slot->dlvry_queue, &internal_abort_data);
1721 		if (rc2 < 0) {
1722 			dev_err(dev, "abort task: internal abort (%d)\n", rc2);
1723 			return TMF_RESP_FUNC_FAILED;
1724 		}
1725 
1726 		/*
1727 		 * If the TMF finds that the IO is not in the device and also
1728 		 * the internal abort does not succeed, then it is safe to
1729 		 * free the slot.
1730 		 * Note: if the internal abort succeeds then the slot
1731 		 * will have already been completed
1732 		 */
1733 		if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) {
1734 			if (task->lldd_task)
1735 				hisi_sas_do_release_task(hisi_hba, task, slot, true);
1736 		}
1737 	} else if (task->task_proto & SAS_PROTOCOL_SATA ||
1738 		task->task_proto & SAS_PROTOCOL_STP) {
1739 		if (task->dev->dev_type == SAS_SATA_DEV) {
1740 			struct ata_queued_cmd *qc = task->uldd_task;
1741 
1742 			rc = hisi_sas_internal_task_abort_dev(sas_dev, false);
1743 			if (rc < 0) {
1744 				dev_err(dev, "abort task: internal abort failed\n");
1745 				goto out;
1746 			}
1747 			hisi_sas_dereg_device(hisi_hba, device);
1748 
1749 			/*
1750 			 * If an ATA internal command times out in ATA EH, it
1751 			 * need to execute soft reset, so check the scsicmd
1752 			 */
1753 			if ((sas_dev->dev_status == HISI_SAS_DEV_NCQ_ERR) &&
1754 			    qc && qc->scsicmd) {
1755 				hisi_sas_do_release_task(hisi_hba, task, slot, true);
1756 				rc = TMF_RESP_FUNC_COMPLETE;
1757 			} else {
1758 				rc = hisi_sas_softreset_ata_disk(device);
1759 			}
1760 		}
1761 	} else if (task->task_proto & SAS_PROTOCOL_SMP) {
1762 		/* SMP */
1763 		u32 tag = slot->idx;
1764 		struct hisi_sas_cq *cq = &hisi_hba->cq[slot->dlvry_queue];
1765 
1766 		rc = sas_execute_internal_abort_single(device,
1767 						       tag, slot->dlvry_queue,
1768 						       &internal_abort_data);
1769 		if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) &&
1770 					task->lldd_task) {
1771 			/*
1772 			 * sync irq or poll queue to avoid free'ing task
1773 			 * before using task in IO completion
1774 			 */
1775 			hisi_sas_sync_cq(cq);
1776 			slot->task = NULL;
1777 		}
1778 	}
1779 
1780 out:
1781 	if (rc != TMF_RESP_FUNC_COMPLETE)
1782 		dev_notice(dev, "abort task: rc=%d\n", rc);
1783 	return rc;
1784 }
1785 
hisi_sas_abort_task_set(struct domain_device * device,u8 * lun)1786 static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun)
1787 {
1788 	struct hisi_sas_device *sas_dev = device->lldd_dev;
1789 	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1790 	struct device *dev = hisi_hba->dev;
1791 	int rc;
1792 
1793 	rc = hisi_sas_internal_task_abort_dev(sas_dev, false);
1794 	if (rc < 0) {
1795 		dev_err(dev, "abort task set: internal abort rc=%d\n", rc);
1796 		return TMF_RESP_FUNC_FAILED;
1797 	}
1798 	hisi_sas_dereg_device(hisi_hba, device);
1799 
1800 	rc = sas_abort_task_set(device, lun);
1801 	if (rc == TMF_RESP_FUNC_COMPLETE)
1802 		hisi_sas_release_task(hisi_hba, device);
1803 
1804 	return rc;
1805 }
1806 
hisi_sas_debug_I_T_nexus_reset(struct domain_device * device)1807 static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
1808 {
1809 	struct sas_phy *local_phy = sas_get_local_phy(device);
1810 	struct hisi_sas_device *sas_dev = device->lldd_dev;
1811 	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1812 	struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1813 	int rc, reset_type;
1814 
1815 	if (!local_phy->enabled) {
1816 		sas_put_local_phy(local_phy);
1817 		return -ENODEV;
1818 	}
1819 
1820 	if (scsi_is_sas_phy_local(local_phy)) {
1821 		struct asd_sas_phy *sas_phy =
1822 			sas_ha->sas_phy[local_phy->number];
1823 		struct hisi_sas_phy *phy =
1824 			container_of(sas_phy, struct hisi_sas_phy, sas_phy);
1825 		unsigned long flags;
1826 
1827 		spin_lock_irqsave(&phy->lock, flags);
1828 		phy->in_reset = 1;
1829 		spin_unlock_irqrestore(&phy->lock, flags);
1830 	}
1831 
1832 	reset_type = (sas_dev->dev_status == HISI_SAS_DEV_INIT ||
1833 		      !dev_is_sata(device)) ? true : false;
1834 
1835 	rc = sas_phy_reset(local_phy, reset_type);
1836 	sas_put_local_phy(local_phy);
1837 
1838 	if (scsi_is_sas_phy_local(local_phy)) {
1839 		struct asd_sas_phy *sas_phy =
1840 			sas_ha->sas_phy[local_phy->number];
1841 		struct hisi_sas_phy *phy =
1842 			container_of(sas_phy, struct hisi_sas_phy, sas_phy);
1843 		unsigned long flags;
1844 
1845 		spin_lock_irqsave(&phy->lock, flags);
1846 		phy->in_reset = 0;
1847 		spin_unlock_irqrestore(&phy->lock, flags);
1848 
1849 		/* report PHY down if timed out */
1850 		if (rc == -ETIMEDOUT)
1851 			hisi_sas_phy_down(hisi_hba, sas_phy->id, 0, GFP_KERNEL);
1852 		return rc;
1853 	}
1854 
1855 	/* Remote phy */
1856 	if (rc)
1857 		return rc;
1858 
1859 	if (dev_is_sata(device)) {
1860 		struct ata_link *link = &device->sata_dev.ap->link;
1861 
1862 		rc = ata_wait_after_reset(link, jiffies + HISI_SAS_WAIT_PHYUP_TIMEOUT,
1863 					  smp_ata_check_ready_type);
1864 	} else {
1865 		msleep(2000);
1866 	}
1867 
1868 	return rc;
1869 }
1870 
hisi_sas_I_T_nexus_reset(struct domain_device * device)1871 static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
1872 {
1873 	struct hisi_sas_device *sas_dev = device->lldd_dev;
1874 	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1875 	struct device *dev = hisi_hba->dev;
1876 	int rc;
1877 
1878 	if (sas_dev->dev_status == HISI_SAS_DEV_NCQ_ERR)
1879 		sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
1880 
1881 	rc = hisi_sas_internal_task_abort_dev(sas_dev, false);
1882 	if (rc < 0) {
1883 		dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc);
1884 		return TMF_RESP_FUNC_FAILED;
1885 	}
1886 	hisi_sas_dereg_device(hisi_hba, device);
1887 
1888 	rc = hisi_sas_debug_I_T_nexus_reset(device);
1889 	if (rc == TMF_RESP_FUNC_COMPLETE && dev_is_sata(device)) {
1890 		struct sas_phy *local_phy;
1891 
1892 		rc = hisi_sas_softreset_ata_disk(device);
1893 		switch (rc) {
1894 		case -ECOMM:
1895 			rc = -ENODEV;
1896 			break;
1897 		case TMF_RESP_FUNC_FAILED:
1898 		case -EMSGSIZE:
1899 		case -EIO:
1900 			local_phy = sas_get_local_phy(device);
1901 			rc = sas_phy_enable(local_phy, 0);
1902 			if (!rc) {
1903 				local_phy->enabled = 0;
1904 				dev_err(dev, "Disabled local phy of ATA disk %016llx due to softreset fail (%d)\n",
1905 					SAS_ADDR(device->sas_addr), rc);
1906 				rc = -ENODEV;
1907 			}
1908 			sas_put_local_phy(local_phy);
1909 			break;
1910 		default:
1911 			break;
1912 		}
1913 	}
1914 
1915 	if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV))
1916 		hisi_sas_release_task(hisi_hba, device);
1917 
1918 	return rc;
1919 }
1920 
hisi_sas_lu_reset(struct domain_device * device,u8 * lun)1921 static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
1922 {
1923 	struct hisi_sas_device *sas_dev = device->lldd_dev;
1924 	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1925 	struct device *dev = hisi_hba->dev;
1926 	int rc = TMF_RESP_FUNC_FAILED;
1927 
1928 	/* Clear internal IO and then lu reset */
1929 	rc = hisi_sas_internal_task_abort_dev(sas_dev, false);
1930 	if (rc < 0) {
1931 		dev_err(dev, "lu_reset: internal abort failed\n");
1932 		goto out;
1933 	}
1934 	hisi_sas_dereg_device(hisi_hba, device);
1935 
1936 	if (dev_is_sata(device)) {
1937 		struct sas_phy *phy;
1938 
1939 		phy = sas_get_local_phy(device);
1940 
1941 		rc = sas_phy_reset(phy, true);
1942 
1943 		if (rc == 0)
1944 			hisi_sas_release_task(hisi_hba, device);
1945 		sas_put_local_phy(phy);
1946 	} else {
1947 		rc = sas_lu_reset(device, lun);
1948 		if (rc == TMF_RESP_FUNC_COMPLETE)
1949 			hisi_sas_release_task(hisi_hba, device);
1950 	}
1951 out:
1952 	if (rc != TMF_RESP_FUNC_COMPLETE)
1953 		dev_err(dev, "lu_reset: for device[%d]:rc= %d\n",
1954 			     sas_dev->device_id, rc);
1955 	return rc;
1956 }
1957 
hisi_sas_async_I_T_nexus_reset(void * data,async_cookie_t cookie)1958 static void hisi_sas_async_I_T_nexus_reset(void *data, async_cookie_t cookie)
1959 {
1960 	struct domain_device *device = data;
1961 	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1962 	int rc;
1963 
1964 	rc = hisi_sas_debug_I_T_nexus_reset(device);
1965 	if (rc != TMF_RESP_FUNC_COMPLETE)
1966 		dev_info(hisi_hba->dev, "I_T_nexus reset fail for dev:%016llx rc=%d\n",
1967 			 SAS_ADDR(device->sas_addr), rc);
1968 }
1969 
hisi_sas_clear_nexus_ha(struct sas_ha_struct * sas_ha)1970 static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha)
1971 {
1972 	struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
1973 	HISI_SAS_DECLARE_RST_WORK_ON_STACK(r);
1974 	ASYNC_DOMAIN_EXCLUSIVE(async);
1975 	int i;
1976 
1977 	queue_work(hisi_hba->wq, &r.work);
1978 	wait_for_completion(r.completion);
1979 	if (!r.done)
1980 		return TMF_RESP_FUNC_FAILED;
1981 
1982 	for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1983 		struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
1984 		struct domain_device *device = sas_dev->sas_device;
1985 
1986 		if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device ||
1987 		    dev_is_expander(device->dev_type))
1988 			continue;
1989 
1990 		async_schedule_domain(hisi_sas_async_I_T_nexus_reset,
1991 				      device, &async);
1992 	}
1993 
1994 	async_synchronize_full_domain(&async);
1995 	hisi_sas_release_tasks(hisi_hba);
1996 
1997 	return TMF_RESP_FUNC_COMPLETE;
1998 }
1999 
hisi_sas_query_task(struct sas_task * task)2000 static int hisi_sas_query_task(struct sas_task *task)
2001 {
2002 	int rc = TMF_RESP_FUNC_FAILED;
2003 
2004 	if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
2005 		struct hisi_sas_slot *slot = task->lldd_task;
2006 		u32 tag = slot->idx;
2007 
2008 		rc = sas_query_task(task, tag);
2009 		switch (rc) {
2010 		/* The task is still in Lun, release it then */
2011 		case TMF_RESP_FUNC_SUCC:
2012 		/* The task is not in Lun or failed, reset the phy */
2013 		case TMF_RESP_FUNC_FAILED:
2014 		case TMF_RESP_FUNC_COMPLETE:
2015 			break;
2016 		default:
2017 			rc = TMF_RESP_FUNC_FAILED;
2018 			break;
2019 		}
2020 	}
2021 	return rc;
2022 }
2023 
hisi_sas_internal_abort_timeout(struct sas_task * task,void * data)2024 static bool hisi_sas_internal_abort_timeout(struct sas_task *task,
2025 					    void *data)
2026 {
2027 	struct domain_device *device = task->dev;
2028 	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
2029 	struct hisi_sas_internal_abort_data *timeout = data;
2030 
2031 	if (hisi_sas_debugfs_enable) {
2032 		/*
2033 		 * If timeout occurs in device gone scenario, to avoid
2034 		 * circular dependency like:
2035 		 * hisi_sas_dev_gone() -> down() -> ... ->
2036 		 * hisi_sas_internal_abort_timeout() -> down().
2037 		 */
2038 		if (!timeout->rst_ha_timeout)
2039 			down(&hisi_hba->sem);
2040 		hisi_hba->hw->debugfs_snapshot_regs(hisi_hba);
2041 		if (!timeout->rst_ha_timeout)
2042 			up(&hisi_hba->sem);
2043 	}
2044 
2045 	if (task->task_state_flags & SAS_TASK_STATE_DONE) {
2046 		pr_err("Internal abort: timeout %016llx\n",
2047 		       SAS_ADDR(device->sas_addr));
2048 	} else {
2049 		struct hisi_sas_slot *slot = task->lldd_task;
2050 
2051 		set_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags);
2052 
2053 		if (slot) {
2054 			struct hisi_sas_cq *cq =
2055 				&hisi_hba->cq[slot->dlvry_queue];
2056 			/*
2057 			 * sync irq or poll queue to avoid free'ing task
2058 			 * before using task in IO completion
2059 			 */
2060 			hisi_sas_sync_cq(cq);
2061 			slot->task = NULL;
2062 		}
2063 
2064 		if (timeout->rst_ha_timeout) {
2065 			pr_err("Internal abort: timeout and not done %016llx. Queuing reset.\n",
2066 			       SAS_ADDR(device->sas_addr));
2067 			queue_work(hisi_hba->wq, &hisi_hba->rst_work);
2068 		} else {
2069 			pr_err("Internal abort: timeout and not done %016llx.\n",
2070 			       SAS_ADDR(device->sas_addr));
2071 		}
2072 
2073 		return true;
2074 	}
2075 
2076 	return false;
2077 }
2078 
hisi_sas_port_formed(struct asd_sas_phy * sas_phy)2079 static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
2080 {
2081 	hisi_sas_port_notify_formed(sas_phy);
2082 }
2083 
hisi_sas_write_gpio(struct sas_ha_struct * sha,u8 reg_type,u8 reg_index,u8 reg_count,u8 * write_data)2084 static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type,
2085 			u8 reg_index, u8 reg_count, u8 *write_data)
2086 {
2087 	struct hisi_hba *hisi_hba = sha->lldd_ha;
2088 
2089 	if (!hisi_hba->hw->write_gpio)
2090 		return -EOPNOTSUPP;
2091 
2092 	return hisi_hba->hw->write_gpio(hisi_hba, reg_type,
2093 				reg_index, reg_count, write_data);
2094 }
2095 
hisi_sas_phy_disconnected(struct hisi_sas_phy * phy)2096 static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy)
2097 {
2098 	struct asd_sas_phy *sas_phy = &phy->sas_phy;
2099 	struct sas_phy *sphy = sas_phy->phy;
2100 	unsigned long flags;
2101 
2102 	phy->phy_attached = 0;
2103 	phy->phy_type = 0;
2104 	phy->port = NULL;
2105 
2106 	spin_lock_irqsave(&phy->lock, flags);
2107 	if (phy->enable)
2108 		sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
2109 	else
2110 		sphy->negotiated_linkrate = SAS_PHY_DISABLED;
2111 	spin_unlock_irqrestore(&phy->lock, flags);
2112 }
2113 
hisi_sas_phy_down(struct hisi_hba * hisi_hba,int phy_no,int rdy,gfp_t gfp_flags)2114 void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy,
2115 		       gfp_t gfp_flags)
2116 {
2117 	struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
2118 	struct asd_sas_phy *sas_phy = &phy->sas_phy;
2119 	struct device *dev = hisi_hba->dev;
2120 
2121 	if (rdy) {
2122 		/* Phy down but ready */
2123 		hisi_sas_bytes_dmaed(hisi_hba, phy_no, gfp_flags);
2124 		hisi_sas_port_notify_formed(sas_phy);
2125 	} else {
2126 		struct hisi_sas_port *port  = phy->port;
2127 
2128 		if (test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags) ||
2129 		    phy->in_reset) {
2130 			dev_info(dev, "ignore flutter phy%d down\n", phy_no);
2131 			return;
2132 		}
2133 		/* Phy down and not ready */
2134 		sas_notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL, gfp_flags);
2135 		sas_phy_disconnected(sas_phy);
2136 
2137 		if (port) {
2138 			if (phy->phy_type & PORT_TYPE_SAS) {
2139 				int port_id = port->id;
2140 
2141 				if (!hisi_hba->hw->get_wideport_bitmap(hisi_hba,
2142 								       port_id))
2143 					port->port_attached = 0;
2144 			} else if (phy->phy_type & PORT_TYPE_SATA)
2145 				port->port_attached = 0;
2146 		}
2147 		hisi_sas_phy_disconnected(phy);
2148 	}
2149 }
2150 EXPORT_SYMBOL_GPL(hisi_sas_phy_down);
2151 
hisi_sas_phy_bcast(struct hisi_sas_phy * phy)2152 void hisi_sas_phy_bcast(struct hisi_sas_phy *phy)
2153 {
2154 	struct asd_sas_phy *sas_phy = &phy->sas_phy;
2155 	struct hisi_hba	*hisi_hba = phy->hisi_hba;
2156 
2157 	if (test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags))
2158 		return;
2159 
2160 	sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD, GFP_ATOMIC);
2161 }
2162 EXPORT_SYMBOL_GPL(hisi_sas_phy_bcast);
2163 
hisi_sas_host_reset(struct Scsi_Host * shost,int reset_type)2164 int hisi_sas_host_reset(struct Scsi_Host *shost, int reset_type)
2165 {
2166 	struct hisi_hba *hisi_hba = shost_priv(shost);
2167 
2168 	if (reset_type != SCSI_ADAPTER_RESET)
2169 		return -EOPNOTSUPP;
2170 
2171 	queue_work(hisi_hba->wq, &hisi_hba->rst_work);
2172 
2173 	return 0;
2174 }
2175 EXPORT_SYMBOL_GPL(hisi_sas_host_reset);
2176 
2177 struct scsi_transport_template *hisi_sas_stt;
2178 EXPORT_SYMBOL_GPL(hisi_sas_stt);
2179 
2180 static struct sas_domain_function_template hisi_sas_transport_ops = {
2181 	.lldd_dev_found		= hisi_sas_dev_found,
2182 	.lldd_dev_gone		= hisi_sas_dev_gone,
2183 	.lldd_execute_task	= hisi_sas_queue_command,
2184 	.lldd_control_phy	= hisi_sas_control_phy,
2185 	.lldd_abort_task	= hisi_sas_abort_task,
2186 	.lldd_abort_task_set	= hisi_sas_abort_task_set,
2187 	.lldd_I_T_nexus_reset	= hisi_sas_I_T_nexus_reset,
2188 	.lldd_lu_reset		= hisi_sas_lu_reset,
2189 	.lldd_query_task	= hisi_sas_query_task,
2190 	.lldd_clear_nexus_ha	= hisi_sas_clear_nexus_ha,
2191 	.lldd_port_formed	= hisi_sas_port_formed,
2192 	.lldd_write_gpio	= hisi_sas_write_gpio,
2193 	.lldd_tmf_aborted	= hisi_sas_tmf_aborted,
2194 	.lldd_abort_timeout	= hisi_sas_internal_abort_timeout,
2195 };
2196 
hisi_sas_init_mem(struct hisi_hba * hisi_hba)2197 void hisi_sas_init_mem(struct hisi_hba *hisi_hba)
2198 {
2199 	int i, s, j, max_command_entries = HISI_SAS_MAX_COMMANDS;
2200 	struct hisi_sas_breakpoint *sata_breakpoint = hisi_hba->sata_breakpoint;
2201 
2202 	for (i = 0; i < hisi_hba->queue_count; i++) {
2203 		struct hisi_sas_cq *cq = &hisi_hba->cq[i];
2204 		struct hisi_sas_dq *dq = &hisi_hba->dq[i];
2205 		struct hisi_sas_cmd_hdr *cmd_hdr = hisi_hba->cmd_hdr[i];
2206 
2207 		s = sizeof(struct hisi_sas_cmd_hdr);
2208 		for (j = 0; j < HISI_SAS_QUEUE_SLOTS; j++)
2209 			memset(&cmd_hdr[j], 0, s);
2210 
2211 		dq->wr_point = 0;
2212 
2213 		s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
2214 		memset(hisi_hba->complete_hdr[i], 0, s);
2215 		cq->rd_point = 0;
2216 	}
2217 
2218 	s = sizeof(struct hisi_sas_initial_fis) * hisi_hba->n_phy;
2219 	memset(hisi_hba->initial_fis, 0, s);
2220 
2221 	s = max_command_entries * sizeof(struct hisi_sas_iost);
2222 	memset(hisi_hba->iost, 0, s);
2223 
2224 	s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
2225 	memset(hisi_hba->breakpoint, 0, s);
2226 
2227 	s = sizeof(struct hisi_sas_sata_breakpoint);
2228 	for (j = 0; j < HISI_SAS_MAX_ITCT_ENTRIES; j++)
2229 		memset(&sata_breakpoint[j], 0, s);
2230 }
2231 EXPORT_SYMBOL_GPL(hisi_sas_init_mem);
2232 
hisi_sas_alloc(struct hisi_hba * hisi_hba)2233 int hisi_sas_alloc(struct hisi_hba *hisi_hba)
2234 {
2235 	struct device *dev = hisi_hba->dev;
2236 	int i, j, s, max_command_entries = HISI_SAS_MAX_COMMANDS;
2237 	int max_command_entries_ru, sz_slot_buf_ru;
2238 	int blk_cnt, slots_per_blk;
2239 
2240 	sema_init(&hisi_hba->sem, 1);
2241 	spin_lock_init(&hisi_hba->lock);
2242 	for (i = 0; i < hisi_hba->n_phy; i++) {
2243 		hisi_sas_phy_init(hisi_hba, i);
2244 		hisi_hba->port[i].port_attached = 0;
2245 		hisi_hba->port[i].id = -1;
2246 	}
2247 
2248 	for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
2249 		hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED;
2250 		hisi_hba->devices[i].device_id = i;
2251 		hisi_hba->devices[i].dev_status = HISI_SAS_DEV_INIT;
2252 	}
2253 
2254 	for (i = 0; i < hisi_hba->queue_count; i++) {
2255 		struct hisi_sas_cq *cq = &hisi_hba->cq[i];
2256 		struct hisi_sas_dq *dq = &hisi_hba->dq[i];
2257 
2258 		/* Completion queue structure */
2259 		cq->id = i;
2260 		cq->hisi_hba = hisi_hba;
2261 		spin_lock_init(&cq->poll_lock);
2262 
2263 		/* Delivery queue structure */
2264 		spin_lock_init(&dq->lock);
2265 		INIT_LIST_HEAD(&dq->list);
2266 		dq->id = i;
2267 		dq->hisi_hba = hisi_hba;
2268 
2269 		/* Delivery queue */
2270 		s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
2271 		hisi_hba->cmd_hdr[i] = dmam_alloc_coherent(dev, s,
2272 						&hisi_hba->cmd_hdr_dma[i],
2273 						GFP_KERNEL);
2274 		if (!hisi_hba->cmd_hdr[i])
2275 			goto err_out;
2276 
2277 		/* Completion queue */
2278 		s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
2279 		hisi_hba->complete_hdr[i] = dmam_alloc_coherent(dev, s,
2280 						&hisi_hba->complete_hdr_dma[i],
2281 						GFP_KERNEL);
2282 		if (!hisi_hba->complete_hdr[i])
2283 			goto err_out;
2284 	}
2285 
2286 	s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
2287 	hisi_hba->itct = dmam_alloc_coherent(dev, s, &hisi_hba->itct_dma,
2288 					     GFP_KERNEL);
2289 	if (!hisi_hba->itct)
2290 		goto err_out;
2291 
2292 	hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries,
2293 					   sizeof(struct hisi_sas_slot),
2294 					   GFP_KERNEL);
2295 	if (!hisi_hba->slot_info)
2296 		goto err_out;
2297 
2298 	/* roundup to avoid overly large block size */
2299 	max_command_entries_ru = roundup(max_command_entries, 64);
2300 	if (hisi_hba->prot_mask & HISI_SAS_DIX_PROT_MASK)
2301 		sz_slot_buf_ru = sizeof(struct hisi_sas_slot_dif_buf_table);
2302 	else
2303 		sz_slot_buf_ru = sizeof(struct hisi_sas_slot_buf_table);
2304 	sz_slot_buf_ru = roundup(sz_slot_buf_ru, 64);
2305 	s = max(lcm(max_command_entries_ru, sz_slot_buf_ru), PAGE_SIZE);
2306 	blk_cnt = (max_command_entries_ru * sz_slot_buf_ru) / s;
2307 	slots_per_blk = s / sz_slot_buf_ru;
2308 
2309 	for (i = 0; i < blk_cnt; i++) {
2310 		int slot_index = i * slots_per_blk;
2311 		dma_addr_t buf_dma;
2312 		void *buf;
2313 
2314 		buf = dmam_alloc_coherent(dev, s, &buf_dma,
2315 					  GFP_KERNEL);
2316 		if (!buf)
2317 			goto err_out;
2318 
2319 		for (j = 0; j < slots_per_blk; j++, slot_index++) {
2320 			struct hisi_sas_slot *slot;
2321 
2322 			slot = &hisi_hba->slot_info[slot_index];
2323 			slot->buf = buf;
2324 			slot->buf_dma = buf_dma;
2325 			slot->idx = slot_index;
2326 
2327 			buf += sz_slot_buf_ru;
2328 			buf_dma += sz_slot_buf_ru;
2329 		}
2330 	}
2331 
2332 	s = max_command_entries * sizeof(struct hisi_sas_iost);
2333 	hisi_hba->iost = dmam_alloc_coherent(dev, s, &hisi_hba->iost_dma,
2334 					     GFP_KERNEL);
2335 	if (!hisi_hba->iost)
2336 		goto err_out;
2337 
2338 	s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
2339 	hisi_hba->breakpoint = dmam_alloc_coherent(dev, s,
2340 						   &hisi_hba->breakpoint_dma,
2341 						   GFP_KERNEL);
2342 	if (!hisi_hba->breakpoint)
2343 		goto err_out;
2344 
2345 	s = hisi_hba->slot_index_count = max_command_entries;
2346 	hisi_hba->slot_index_tags = devm_bitmap_zalloc(dev, s, GFP_KERNEL);
2347 	if (!hisi_hba->slot_index_tags)
2348 		goto err_out;
2349 
2350 	s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
2351 	hisi_hba->initial_fis = dmam_alloc_coherent(dev, s,
2352 						    &hisi_hba->initial_fis_dma,
2353 						    GFP_KERNEL);
2354 	if (!hisi_hba->initial_fis)
2355 		goto err_out;
2356 
2357 	s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
2358 	hisi_hba->sata_breakpoint = dmam_alloc_coherent(dev, s,
2359 					&hisi_hba->sata_breakpoint_dma,
2360 					GFP_KERNEL);
2361 	if (!hisi_hba->sata_breakpoint)
2362 		goto err_out;
2363 
2364 	hisi_hba->last_slot_index = 0;
2365 
2366 	hisi_hba->wq =
2367 		alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, dev_name(dev));
2368 	if (!hisi_hba->wq) {
2369 		dev_err(dev, "sas_alloc: failed to create workqueue\n");
2370 		goto err_out;
2371 	}
2372 
2373 	return 0;
2374 err_out:
2375 	return -ENOMEM;
2376 }
2377 EXPORT_SYMBOL_GPL(hisi_sas_alloc);
2378 
hisi_sas_free(struct hisi_hba * hisi_hba)2379 void hisi_sas_free(struct hisi_hba *hisi_hba)
2380 {
2381 	int i;
2382 
2383 	for (i = 0; i < hisi_hba->n_phy; i++) {
2384 		struct hisi_sas_phy *phy = &hisi_hba->phy[i];
2385 
2386 		timer_delete_sync(&phy->timer);
2387 	}
2388 
2389 	if (hisi_hba->wq)
2390 		destroy_workqueue(hisi_hba->wq);
2391 }
2392 EXPORT_SYMBOL_GPL(hisi_sas_free);
2393 
hisi_sas_rst_work_handler(struct work_struct * work)2394 void hisi_sas_rst_work_handler(struct work_struct *work)
2395 {
2396 	struct hisi_hba *hisi_hba =
2397 		container_of(work, struct hisi_hba, rst_work);
2398 
2399 	if (hisi_sas_controller_prereset(hisi_hba))
2400 		return;
2401 
2402 	hisi_sas_controller_reset(hisi_hba);
2403 }
2404 EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler);
2405 
hisi_sas_sync_rst_work_handler(struct work_struct * work)2406 void hisi_sas_sync_rst_work_handler(struct work_struct *work)
2407 {
2408 	struct hisi_sas_rst *rst =
2409 		container_of(work, struct hisi_sas_rst, work);
2410 
2411 	if (hisi_sas_controller_prereset(rst->hisi_hba))
2412 		goto rst_complete;
2413 
2414 	if (!hisi_sas_controller_reset(rst->hisi_hba))
2415 		rst->done = true;
2416 rst_complete:
2417 	complete(rst->completion);
2418 }
2419 EXPORT_SYMBOL_GPL(hisi_sas_sync_rst_work_handler);
2420 
hisi_sas_get_fw_info(struct hisi_hba * hisi_hba)2421 int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba)
2422 {
2423 	struct device *dev = hisi_hba->dev;
2424 	struct platform_device *pdev = hisi_hba->platform_dev;
2425 	struct device_node *np = pdev ? pdev->dev.of_node : NULL;
2426 	struct clk *refclk;
2427 
2428 	if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr,
2429 					  SAS_ADDR_SIZE)) {
2430 		dev_err(dev, "could not get property sas-addr\n");
2431 		return -ENOENT;
2432 	}
2433 
2434 	if (np) {
2435 		/*
2436 		 * These properties are only required for platform device-based
2437 		 * controller with DT firmware.
2438 		 */
2439 		hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np,
2440 					"hisilicon,sas-syscon");
2441 		if (IS_ERR(hisi_hba->ctrl)) {
2442 			dev_err(dev, "could not get syscon\n");
2443 			return -ENOENT;
2444 		}
2445 
2446 		if (device_property_read_u32(dev, "ctrl-reset-reg",
2447 					     &hisi_hba->ctrl_reset_reg)) {
2448 			dev_err(dev, "could not get property ctrl-reset-reg\n");
2449 			return -ENOENT;
2450 		}
2451 
2452 		if (device_property_read_u32(dev, "ctrl-reset-sts-reg",
2453 					     &hisi_hba->ctrl_reset_sts_reg)) {
2454 			dev_err(dev, "could not get property ctrl-reset-sts-reg\n");
2455 			return -ENOENT;
2456 		}
2457 
2458 		if (device_property_read_u32(dev, "ctrl-clock-ena-reg",
2459 					     &hisi_hba->ctrl_clock_ena_reg)) {
2460 			dev_err(dev, "could not get property ctrl-clock-ena-reg\n");
2461 			return -ENOENT;
2462 		}
2463 	}
2464 
2465 	refclk = devm_clk_get(dev, NULL);
2466 	if (IS_ERR(refclk))
2467 		dev_dbg(dev, "no ref clk property\n");
2468 	else
2469 		hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000;
2470 
2471 	if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) {
2472 		dev_err(dev, "could not get property phy-count\n");
2473 		return -ENOENT;
2474 	}
2475 
2476 	if (device_property_read_u32(dev, "queue-count",
2477 				     &hisi_hba->queue_count)) {
2478 		dev_err(dev, "could not get property queue-count\n");
2479 		return -ENOENT;
2480 	}
2481 
2482 	return 0;
2483 }
2484 EXPORT_SYMBOL_GPL(hisi_sas_get_fw_info);
2485 
hisi_sas_shost_alloc(struct platform_device * pdev,const struct hisi_sas_hw * hw)2486 static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
2487 					      const struct hisi_sas_hw *hw)
2488 {
2489 	struct resource *res;
2490 	struct Scsi_Host *shost;
2491 	struct hisi_hba *hisi_hba;
2492 	struct device *dev = &pdev->dev;
2493 	int error;
2494 
2495 	shost = scsi_host_alloc(hw->sht, sizeof(*hisi_hba));
2496 	if (!shost) {
2497 		dev_err(dev, "scsi host alloc failed\n");
2498 		return NULL;
2499 	}
2500 	hisi_hba = shost_priv(shost);
2501 
2502 	INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler);
2503 	hisi_hba->hw = hw;
2504 	hisi_hba->dev = dev;
2505 	hisi_hba->platform_dev = pdev;
2506 	hisi_hba->shost = shost;
2507 	SHOST_TO_SAS_HA(shost) = &hisi_hba->sha;
2508 
2509 	timer_setup(&hisi_hba->timer, NULL, 0);
2510 
2511 	if (hisi_sas_get_fw_info(hisi_hba) < 0)
2512 		goto err_out;
2513 
2514 	if (hisi_hba->hw->fw_info_check) {
2515 		if (hisi_hba->hw->fw_info_check(hisi_hba))
2516 			goto err_out;
2517 	}
2518 
2519 	error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
2520 	if (error) {
2521 		dev_err(dev, "No usable DMA addressing method\n");
2522 		goto err_out;
2523 	}
2524 
2525 	hisi_hba->regs = devm_platform_ioremap_resource(pdev, 0);
2526 	if (IS_ERR(hisi_hba->regs))
2527 		goto err_out;
2528 
2529 	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2530 	if (res) {
2531 		hisi_hba->sgpio_regs = devm_ioremap_resource(dev, res);
2532 		if (IS_ERR(hisi_hba->sgpio_regs))
2533 			goto err_out;
2534 	}
2535 
2536 	if (hisi_sas_alloc(hisi_hba)) {
2537 		hisi_sas_free(hisi_hba);
2538 		goto err_out;
2539 	}
2540 
2541 	return shost;
2542 err_out:
2543 	scsi_host_put(shost);
2544 	dev_err(dev, "shost alloc failed\n");
2545 	return NULL;
2546 }
2547 
hisi_sas_interrupt_preinit(struct hisi_hba * hisi_hba)2548 static int hisi_sas_interrupt_preinit(struct hisi_hba *hisi_hba)
2549 {
2550 	if (hisi_hba->hw->interrupt_preinit)
2551 		return hisi_hba->hw->interrupt_preinit(hisi_hba);
2552 	return 0;
2553 }
2554 
hisi_sas_probe(struct platform_device * pdev,const struct hisi_sas_hw * hw)2555 int hisi_sas_probe(struct platform_device *pdev,
2556 		   const struct hisi_sas_hw *hw)
2557 {
2558 	struct Scsi_Host *shost;
2559 	struct hisi_hba *hisi_hba;
2560 	struct device *dev = &pdev->dev;
2561 	struct asd_sas_phy **arr_phy;
2562 	struct asd_sas_port **arr_port;
2563 	struct sas_ha_struct *sha;
2564 	int rc, phy_nr, port_nr, i;
2565 
2566 	shost = hisi_sas_shost_alloc(pdev, hw);
2567 	if (!shost)
2568 		return -ENOMEM;
2569 
2570 	sha = SHOST_TO_SAS_HA(shost);
2571 	hisi_hba = shost_priv(shost);
2572 	platform_set_drvdata(pdev, sha);
2573 
2574 	phy_nr = port_nr = hisi_hba->n_phy;
2575 
2576 	arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL);
2577 	arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL);
2578 	if (!arr_phy || !arr_port) {
2579 		rc = -ENOMEM;
2580 		goto err_out_ha;
2581 	}
2582 
2583 	sha->sas_phy = arr_phy;
2584 	sha->sas_port = arr_port;
2585 	sha->lldd_ha = hisi_hba;
2586 
2587 	shost->transportt = hisi_sas_stt;
2588 	shost->max_id = HISI_SAS_MAX_DEVICES;
2589 	shost->max_lun = ~0;
2590 	shost->max_channel = 1;
2591 	shost->max_cmd_len = 16;
2592 	if (hisi_hba->hw->slot_index_alloc) {
2593 		shost->can_queue = HISI_SAS_MAX_COMMANDS;
2594 		shost->cmd_per_lun = HISI_SAS_MAX_COMMANDS;
2595 	} else {
2596 		shost->can_queue = HISI_SAS_UNRESERVED_IPTT;
2597 		shost->cmd_per_lun = HISI_SAS_UNRESERVED_IPTT;
2598 	}
2599 
2600 	sha->sas_ha_name = DRV_NAME;
2601 	sha->dev = hisi_hba->dev;
2602 	sha->sas_addr = &hisi_hba->sas_addr[0];
2603 	sha->num_phys = hisi_hba->n_phy;
2604 	sha->shost = hisi_hba->shost;
2605 
2606 	for (i = 0; i < hisi_hba->n_phy; i++) {
2607 		sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy;
2608 		sha->sas_port[i] = &hisi_hba->port[i].sas_port;
2609 	}
2610 
2611 	rc = hisi_sas_interrupt_preinit(hisi_hba);
2612 	if (rc)
2613 		goto err_out_ha;
2614 
2615 	rc = scsi_add_host(shost, &pdev->dev);
2616 	if (rc)
2617 		goto err_out_ha;
2618 
2619 	rc = sas_register_ha(sha);
2620 	if (rc)
2621 		goto err_out_register_ha;
2622 
2623 	rc = hisi_hba->hw->hw_init(hisi_hba);
2624 	if (rc)
2625 		goto err_out_hw_init;
2626 
2627 	scsi_scan_host(shost);
2628 
2629 	return 0;
2630 
2631 err_out_hw_init:
2632 	sas_unregister_ha(sha);
2633 err_out_register_ha:
2634 	scsi_remove_host(shost);
2635 err_out_ha:
2636 	hisi_sas_free(hisi_hba);
2637 	scsi_host_put(shost);
2638 	return rc;
2639 }
2640 EXPORT_SYMBOL_GPL(hisi_sas_probe);
2641 
hisi_sas_remove(struct platform_device * pdev)2642 void hisi_sas_remove(struct platform_device *pdev)
2643 {
2644 	struct sas_ha_struct *sha = platform_get_drvdata(pdev);
2645 	struct hisi_hba *hisi_hba = sha->lldd_ha;
2646 	struct Scsi_Host *shost = sha->shost;
2647 
2648 	timer_delete_sync(&hisi_hba->timer);
2649 
2650 	sas_unregister_ha(sha);
2651 	sas_remove_host(shost);
2652 
2653 	hisi_sas_free(hisi_hba);
2654 	scsi_host_put(shost);
2655 }
2656 EXPORT_SYMBOL_GPL(hisi_sas_remove);
2657 
2658 #if IS_ENABLED(CONFIG_SCSI_HISI_SAS_DEBUGFS_DEFAULT_ENABLE)
2659 #define DEBUGFS_ENABLE_DEFAULT  "enabled"
2660 bool hisi_sas_debugfs_enable = true;
2661 u32 hisi_sas_debugfs_dump_count = 50;
2662 #else
2663 #define DEBUGFS_ENABLE_DEFAULT "disabled"
2664 bool hisi_sas_debugfs_enable;
2665 u32 hisi_sas_debugfs_dump_count = 1;
2666 #endif
2667 
2668 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_enable);
2669 module_param_named(debugfs_enable, hisi_sas_debugfs_enable, bool, 0444);
2670 MODULE_PARM_DESC(hisi_sas_debugfs_enable,
2671 		 "Enable driver debugfs (default "DEBUGFS_ENABLE_DEFAULT")");
2672 
2673 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_dump_count);
2674 module_param_named(debugfs_dump_count, hisi_sas_debugfs_dump_count, uint, 0444);
2675 MODULE_PARM_DESC(hisi_sas_debugfs_dump_count, "Number of debugfs dumps to allow");
2676 
2677 struct dentry *hisi_sas_debugfs_dir;
2678 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_dir);
2679 
hisi_sas_init(void)2680 static __init int hisi_sas_init(void)
2681 {
2682 	hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops);
2683 	if (!hisi_sas_stt)
2684 		return -ENOMEM;
2685 
2686 	if (hisi_sas_debugfs_enable) {
2687 		hisi_sas_debugfs_dir = debugfs_create_dir("hisi_sas", NULL);
2688 		if (hisi_sas_debugfs_dump_count > HISI_SAS_MAX_DEBUGFS_DUMP) {
2689 			pr_info("hisi_sas: Limiting debugfs dump count\n");
2690 			hisi_sas_debugfs_dump_count = HISI_SAS_MAX_DEBUGFS_DUMP;
2691 		}
2692 	}
2693 
2694 	return 0;
2695 }
2696 
hisi_sas_exit(void)2697 static __exit void hisi_sas_exit(void)
2698 {
2699 	if (hisi_sas_debugfs_enable)
2700 		debugfs_remove(hisi_sas_debugfs_dir);
2701 
2702 	sas_release_transport(hisi_sas_stt);
2703 }
2704 
2705 module_init(hisi_sas_init);
2706 module_exit(hisi_sas_exit);
2707 
2708 MODULE_LICENSE("GPL");
2709 MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
2710 MODULE_DESCRIPTION("HISILICON SAS controller driver");
2711 MODULE_ALIAS("platform:" DRV_NAME);
2712