1 /*
2 *******************************************************************************
3 ** O.S : Linux
4 ** FILE NAME : arcmsr_hba.c
5 ** BY : Nick Cheng, C.L. Huang
6 ** Description: SCSI RAID Device Driver for Areca RAID Controller
7 *******************************************************************************
8 ** Copyright (C) 2002 - 2014, Areca Technology Corporation All rights reserved
9 **
10 ** Web site: www.areca.com.tw
11 ** E-mail: support@areca.com.tw
12 **
13 ** This program is free software; you can redistribute it and/or modify
14 ** it under the terms of the GNU General Public License version 2 as
15 ** published by the Free Software Foundation.
16 ** This program is distributed in the hope that it will be useful,
17 ** but WITHOUT ANY WARRANTY; without even the implied warranty of
18 ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 ** GNU General Public License for more details.
20 *******************************************************************************
21 ** Redistribution and use in source and binary forms, with or without
22 ** modification, are permitted provided that the following conditions
23 ** are met:
24 ** 1. Redistributions of source code must retain the above copyright
25 ** notice, this list of conditions and the following disclaimer.
26 ** 2. Redistributions in binary form must reproduce the above copyright
27 ** notice, this list of conditions and the following disclaimer in the
28 ** documentation and/or other materials provided with the distribution.
29 ** 3. The name of the author may not be used to endorse or promote products
30 ** derived from this software without specific prior written permission.
31 **
32 ** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
33 ** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
34 ** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
35 ** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
36 ** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING,BUT
37 ** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
38 ** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY
39 ** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
40 ** (INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF
41 ** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 *******************************************************************************
43 ** For history of changes, see Documentation/scsi/ChangeLog.arcmsr
44 ** Firmware Specification, see Documentation/scsi/arcmsr_spec.rst
45 *******************************************************************************
46 */
47 #include <linux/module.h>
48 #include <linux/reboot.h>
49 #include <linux/spinlock.h>
50 #include <linux/pci_ids.h>
51 #include <linux/interrupt.h>
52 #include <linux/moduleparam.h>
53 #include <linux/errno.h>
54 #include <linux/types.h>
55 #include <linux/delay.h>
56 #include <linux/dma-mapping.h>
57 #include <linux/timer.h>
58 #include <linux/slab.h>
59 #include <linux/pci.h>
60 #include <linux/circ_buf.h>
61 #include <asm/dma.h>
62 #include <asm/io.h>
63 #include <linux/uaccess.h>
64 #include <scsi/scsi_host.h>
65 #include <scsi/scsi.h>
66 #include <scsi/scsi_cmnd.h>
67 #include <scsi/scsi_tcq.h>
68 #include <scsi/scsi_device.h>
69 #include <scsi/scsi_transport.h>
70 #include <scsi/scsicam.h>
71 #include "arcmsr.h"
72 MODULE_AUTHOR("Nick Cheng, C.L. Huang <support@areca.com.tw>");
73 MODULE_DESCRIPTION("Areca ARC11xx/12xx/16xx/188x SAS/SATA RAID Controller Driver");
74 MODULE_LICENSE("Dual BSD/GPL");
75 MODULE_VERSION(ARCMSR_DRIVER_VERSION);
76
77 static int msix_enable = 1;
78 module_param(msix_enable, int, S_IRUGO);
79 MODULE_PARM_DESC(msix_enable, "Enable MSI-X interrupt(0 ~ 1), msix_enable=1(enable), =0(disable)");
80
81 static int msi_enable = 1;
82 module_param(msi_enable, int, S_IRUGO);
83 MODULE_PARM_DESC(msi_enable, "Enable MSI interrupt(0 ~ 1), msi_enable=1(enable), =0(disable)");
84
85 static int host_can_queue = ARCMSR_DEFAULT_OUTSTANDING_CMD;
86 module_param(host_can_queue, int, S_IRUGO);
87 MODULE_PARM_DESC(host_can_queue, " adapter queue depth(32 ~ 1024), default is 128");
88
89 static int cmd_per_lun = ARCMSR_DEFAULT_CMD_PERLUN;
90 module_param(cmd_per_lun, int, S_IRUGO);
91 MODULE_PARM_DESC(cmd_per_lun, " device queue depth(1 ~ 128), default is 32");
92
93 static int dma_mask_64 = 0;
94 module_param(dma_mask_64, int, S_IRUGO);
95 MODULE_PARM_DESC(dma_mask_64, " set DMA mask to 64 bits(0 ~ 1), dma_mask_64=1(64 bits), =0(32 bits)");
96
97 static int set_date_time = 0;
98 module_param(set_date_time, int, S_IRUGO);
99 MODULE_PARM_DESC(set_date_time, " send date, time to iop(0 ~ 1), set_date_time=1(enable), default(=0) is disable");
100
101 static int cmd_timeout = ARCMSR_DEFAULT_TIMEOUT;
102 module_param(cmd_timeout, int, S_IRUGO);
103 MODULE_PARM_DESC(cmd_timeout, " scsi cmd timeout(0 ~ 120 sec.), default is 90");
104
105 #define ARCMSR_SLEEPTIME 10
106 #define ARCMSR_RETRYCOUNT 12
107
108 static wait_queue_head_t wait_q;
109 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
110 struct scsi_cmnd *cmd);
111 static int arcmsr_iop_confirm(struct AdapterControlBlock *acb);
112 static int arcmsr_abort(struct scsi_cmnd *);
113 static int arcmsr_bus_reset(struct scsi_cmnd *);
114 static int arcmsr_bios_param(struct scsi_device *sdev,
115 struct gendisk *disk, sector_t capacity, int *info);
116 static enum scsi_qc_status arcmsr_queue_command(struct Scsi_Host *h,
117 struct scsi_cmnd *cmd);
118 static int arcmsr_probe(struct pci_dev *pdev,
119 const struct pci_device_id *id);
120 static int __maybe_unused arcmsr_suspend(struct device *dev);
121 static int __maybe_unused arcmsr_resume(struct device *dev);
122 static void arcmsr_remove(struct pci_dev *pdev);
123 static void arcmsr_shutdown(struct pci_dev *pdev);
124 static void arcmsr_iop_init(struct AdapterControlBlock *acb);
125 static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb);
126 static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb);
127 static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb,
128 u32 intmask_org);
129 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb);
130 static void arcmsr_hbaA_flush_cache(struct AdapterControlBlock *acb);
131 static void arcmsr_hbaB_flush_cache(struct AdapterControlBlock *acb);
132 static void arcmsr_request_device_map(struct timer_list *t);
133 static void arcmsr_message_isr_bh_fn(struct work_struct *work);
134 static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb);
135 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb);
136 static void arcmsr_hbaC_message_isr(struct AdapterControlBlock *pACB);
137 static void arcmsr_hbaD_message_isr(struct AdapterControlBlock *acb);
138 static void arcmsr_hbaE_message_isr(struct AdapterControlBlock *acb);
139 static void arcmsr_hbaE_postqueue_isr(struct AdapterControlBlock *acb);
140 static void arcmsr_hbaF_postqueue_isr(struct AdapterControlBlock *acb);
141 static void arcmsr_hardware_reset(struct AdapterControlBlock *acb);
142 static const char *arcmsr_info(struct Scsi_Host *);
143 static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb);
144 static void arcmsr_free_irq(struct pci_dev *, struct AdapterControlBlock *);
145 static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb);
146 static void arcmsr_set_iop_datetime(struct timer_list *);
147 static int arcmsr_sdev_configure(struct scsi_device *sdev,
148 struct queue_limits *lim);
arcmsr_adjust_disk_queue_depth(struct scsi_device * sdev,int queue_depth)149 static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, int queue_depth)
150 {
151 if (queue_depth > ARCMSR_MAX_CMD_PERLUN)
152 queue_depth = ARCMSR_MAX_CMD_PERLUN;
153 return scsi_change_queue_depth(sdev, queue_depth);
154 }
155
156 static const struct scsi_host_template arcmsr_scsi_host_template = {
157 .module = THIS_MODULE,
158 .proc_name = ARCMSR_NAME,
159 .name = "Areca SAS/SATA RAID driver",
160 .info = arcmsr_info,
161 .queuecommand = arcmsr_queue_command,
162 .eh_abort_handler = arcmsr_abort,
163 .eh_bus_reset_handler = arcmsr_bus_reset,
164 .bios_param = arcmsr_bios_param,
165 .sdev_configure = arcmsr_sdev_configure,
166 .change_queue_depth = arcmsr_adjust_disk_queue_depth,
167 .can_queue = ARCMSR_DEFAULT_OUTSTANDING_CMD,
168 .this_id = ARCMSR_SCSI_INITIATOR_ID,
169 .sg_tablesize = ARCMSR_DEFAULT_SG_ENTRIES,
170 .max_sectors = ARCMSR_MAX_XFER_SECTORS_C,
171 .cmd_per_lun = ARCMSR_DEFAULT_CMD_PERLUN,
172 .shost_groups = arcmsr_host_groups,
173 .no_write_same = 1,
174 };
175
176 static const struct pci_device_id arcmsr_device_id_table[] = {
177 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110),
178 .driver_data = ACB_ADAPTER_TYPE_A},
179 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1120),
180 .driver_data = ACB_ADAPTER_TYPE_A},
181 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1130),
182 .driver_data = ACB_ADAPTER_TYPE_A},
183 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1160),
184 .driver_data = ACB_ADAPTER_TYPE_A},
185 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1170),
186 .driver_data = ACB_ADAPTER_TYPE_A},
187 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1200),
188 .driver_data = ACB_ADAPTER_TYPE_B},
189 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1201),
190 .driver_data = ACB_ADAPTER_TYPE_B},
191 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1202),
192 .driver_data = ACB_ADAPTER_TYPE_B},
193 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1203),
194 .driver_data = ACB_ADAPTER_TYPE_B},
195 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1210),
196 .driver_data = ACB_ADAPTER_TYPE_A},
197 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1214),
198 .driver_data = ACB_ADAPTER_TYPE_D},
199 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1220),
200 .driver_data = ACB_ADAPTER_TYPE_A},
201 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1230),
202 .driver_data = ACB_ADAPTER_TYPE_A},
203 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1260),
204 .driver_data = ACB_ADAPTER_TYPE_A},
205 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1270),
206 .driver_data = ACB_ADAPTER_TYPE_A},
207 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1280),
208 .driver_data = ACB_ADAPTER_TYPE_A},
209 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1380),
210 .driver_data = ACB_ADAPTER_TYPE_A},
211 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1381),
212 .driver_data = ACB_ADAPTER_TYPE_A},
213 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1680),
214 .driver_data = ACB_ADAPTER_TYPE_A},
215 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1681),
216 .driver_data = ACB_ADAPTER_TYPE_A},
217 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1880),
218 .driver_data = ACB_ADAPTER_TYPE_C},
219 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1883),
220 .driver_data = ACB_ADAPTER_TYPE_C},
221 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1884),
222 .driver_data = ACB_ADAPTER_TYPE_E},
223 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1886_0),
224 .driver_data = ACB_ADAPTER_TYPE_F},
225 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1886),
226 .driver_data = ACB_ADAPTER_TYPE_F},
227 {0, 0}, /* Terminating entry */
228 };
229 MODULE_DEVICE_TABLE(pci, arcmsr_device_id_table);
230
231 static SIMPLE_DEV_PM_OPS(arcmsr_pm_ops, arcmsr_suspend, arcmsr_resume);
232
233 static struct pci_driver arcmsr_pci_driver = {
234 .name = "arcmsr",
235 .id_table = arcmsr_device_id_table,
236 .probe = arcmsr_probe,
237 .remove = arcmsr_remove,
238 .driver.pm = &arcmsr_pm_ops,
239 .shutdown = arcmsr_shutdown,
240 };
241 /*
242 ****************************************************************************
243 ****************************************************************************
244 */
245
arcmsr_free_io_queue(struct AdapterControlBlock * acb)246 static void arcmsr_free_io_queue(struct AdapterControlBlock *acb)
247 {
248 switch (acb->adapter_type) {
249 case ACB_ADAPTER_TYPE_B:
250 case ACB_ADAPTER_TYPE_D:
251 case ACB_ADAPTER_TYPE_E:
252 case ACB_ADAPTER_TYPE_F:
253 dma_free_coherent(&acb->pdev->dev, acb->ioqueue_size,
254 acb->dma_coherent2, acb->dma_coherent_handle2);
255 break;
256 }
257 }
258
arcmsr_remap_pciregion(struct AdapterControlBlock * acb)259 static bool arcmsr_remap_pciregion(struct AdapterControlBlock *acb)
260 {
261 struct pci_dev *pdev = acb->pdev;
262 switch (acb->adapter_type){
263 case ACB_ADAPTER_TYPE_A:{
264 acb->pmuA = ioremap(pci_resource_start(pdev,0), pci_resource_len(pdev,0));
265 if (!acb->pmuA) {
266 printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
267 return false;
268 }
269 break;
270 }
271 case ACB_ADAPTER_TYPE_B:{
272 void __iomem *mem_base0, *mem_base1;
273 mem_base0 = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
274 if (!mem_base0) {
275 printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
276 return false;
277 }
278 mem_base1 = ioremap(pci_resource_start(pdev, 2), pci_resource_len(pdev, 2));
279 if (!mem_base1) {
280 iounmap(mem_base0);
281 printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
282 return false;
283 }
284 acb->mem_base0 = mem_base0;
285 acb->mem_base1 = mem_base1;
286 break;
287 }
288 case ACB_ADAPTER_TYPE_C:{
289 acb->pmuC = ioremap(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
290 if (!acb->pmuC) {
291 printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
292 return false;
293 }
294 if (readl(&acb->pmuC->outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
295 writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, &acb->pmuC->outbound_doorbell_clear);/*clear interrupt*/
296 return true;
297 }
298 break;
299 }
300 case ACB_ADAPTER_TYPE_D: {
301 void __iomem *mem_base0;
302 unsigned long addr, range;
303
304 addr = (unsigned long)pci_resource_start(pdev, 0);
305 range = pci_resource_len(pdev, 0);
306 mem_base0 = ioremap(addr, range);
307 if (!mem_base0) {
308 pr_notice("arcmsr%d: memory mapping region fail\n",
309 acb->host->host_no);
310 return false;
311 }
312 acb->mem_base0 = mem_base0;
313 break;
314 }
315 case ACB_ADAPTER_TYPE_E: {
316 acb->pmuE = ioremap(pci_resource_start(pdev, 1),
317 pci_resource_len(pdev, 1));
318 if (!acb->pmuE) {
319 pr_notice("arcmsr%d: memory mapping region fail \n",
320 acb->host->host_no);
321 return false;
322 }
323 writel(0, &acb->pmuE->host_int_status); /*clear interrupt*/
324 writel(ARCMSR_HBEMU_DOORBELL_SYNC, &acb->pmuE->iobound_doorbell); /* synchronize doorbell to 0 */
325 acb->in_doorbell = 0;
326 acb->out_doorbell = 0;
327 break;
328 }
329 case ACB_ADAPTER_TYPE_F: {
330 acb->pmuF = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
331 if (!acb->pmuF) {
332 pr_notice("arcmsr%d: memory mapping region fail\n",
333 acb->host->host_no);
334 return false;
335 }
336 writel(0, &acb->pmuF->host_int_status); /* clear interrupt */
337 writel(ARCMSR_HBFMU_DOORBELL_SYNC, &acb->pmuF->iobound_doorbell);
338 acb->in_doorbell = 0;
339 acb->out_doorbell = 0;
340 break;
341 }
342 }
343 return true;
344 }
345
arcmsr_unmap_pciregion(struct AdapterControlBlock * acb)346 static void arcmsr_unmap_pciregion(struct AdapterControlBlock *acb)
347 {
348 switch (acb->adapter_type) {
349 case ACB_ADAPTER_TYPE_A:
350 iounmap(acb->pmuA);
351 break;
352 case ACB_ADAPTER_TYPE_B:
353 iounmap(acb->mem_base0);
354 iounmap(acb->mem_base1);
355 break;
356 case ACB_ADAPTER_TYPE_C:
357 iounmap(acb->pmuC);
358 break;
359 case ACB_ADAPTER_TYPE_D:
360 iounmap(acb->mem_base0);
361 break;
362 case ACB_ADAPTER_TYPE_E:
363 iounmap(acb->pmuE);
364 break;
365 case ACB_ADAPTER_TYPE_F:
366 iounmap(acb->pmuF);
367 break;
368 }
369 }
370
arcmsr_do_interrupt(int irq,void * dev_id)371 static irqreturn_t arcmsr_do_interrupt(int irq, void *dev_id)
372 {
373 irqreturn_t handle_state;
374 struct AdapterControlBlock *acb = dev_id;
375
376 handle_state = arcmsr_interrupt(acb);
377 return handle_state;
378 }
379
arcmsr_bios_param(struct scsi_device * sdev,struct gendisk * disk,sector_t capacity,int * geom)380 static int arcmsr_bios_param(struct scsi_device *sdev,
381 struct gendisk *disk, sector_t capacity, int *geom)
382 {
383 int heads, sectors, cylinders, total_capacity;
384
385 if (scsi_partsize(disk, capacity, geom))
386 return 0;
387
388 total_capacity = capacity;
389 heads = 64;
390 sectors = 32;
391 cylinders = total_capacity / (heads * sectors);
392 if (cylinders > 1024) {
393 heads = 255;
394 sectors = 63;
395 cylinders = total_capacity / (heads * sectors);
396 }
397 geom[0] = heads;
398 geom[1] = sectors;
399 geom[2] = cylinders;
400 return 0;
401 }
402
arcmsr_hbaA_wait_msgint_ready(struct AdapterControlBlock * acb)403 static uint8_t arcmsr_hbaA_wait_msgint_ready(struct AdapterControlBlock *acb)
404 {
405 struct MessageUnit_A __iomem *reg = acb->pmuA;
406 int i;
407
408 for (i = 0; i < 2000; i++) {
409 if (readl(®->outbound_intstatus) &
410 ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
411 writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT,
412 ®->outbound_intstatus);
413 return true;
414 }
415 msleep(10);
416 } /* max 20 seconds */
417
418 return false;
419 }
420
arcmsr_hbaB_wait_msgint_ready(struct AdapterControlBlock * acb)421 static uint8_t arcmsr_hbaB_wait_msgint_ready(struct AdapterControlBlock *acb)
422 {
423 struct MessageUnit_B *reg = acb->pmuB;
424 int i;
425
426 for (i = 0; i < 2000; i++) {
427 if (readl(reg->iop2drv_doorbell)
428 & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
429 writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN,
430 reg->iop2drv_doorbell);
431 writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT,
432 reg->drv2iop_doorbell);
433 return true;
434 }
435 msleep(10);
436 } /* max 20 seconds */
437
438 return false;
439 }
440
arcmsr_hbaC_wait_msgint_ready(struct AdapterControlBlock * pACB)441 static uint8_t arcmsr_hbaC_wait_msgint_ready(struct AdapterControlBlock *pACB)
442 {
443 struct MessageUnit_C __iomem *phbcmu = pACB->pmuC;
444 int i;
445
446 for (i = 0; i < 2000; i++) {
447 if (readl(&phbcmu->outbound_doorbell)
448 & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
449 writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR,
450 &phbcmu->outbound_doorbell_clear); /*clear interrupt*/
451 return true;
452 }
453 msleep(10);
454 } /* max 20 seconds */
455
456 return false;
457 }
458
arcmsr_hbaD_wait_msgint_ready(struct AdapterControlBlock * pACB)459 static bool arcmsr_hbaD_wait_msgint_ready(struct AdapterControlBlock *pACB)
460 {
461 struct MessageUnit_D *reg = pACB->pmuD;
462 int i;
463
464 for (i = 0; i < 2000; i++) {
465 if (readl(reg->outbound_doorbell)
466 & ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE) {
467 writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE,
468 reg->outbound_doorbell);
469 return true;
470 }
471 msleep(10);
472 } /* max 20 seconds */
473 return false;
474 }
475
arcmsr_hbaE_wait_msgint_ready(struct AdapterControlBlock * pACB)476 static bool arcmsr_hbaE_wait_msgint_ready(struct AdapterControlBlock *pACB)
477 {
478 int i;
479 uint32_t read_doorbell;
480 struct MessageUnit_E __iomem *phbcmu = pACB->pmuE;
481
482 for (i = 0; i < 2000; i++) {
483 read_doorbell = readl(&phbcmu->iobound_doorbell);
484 if ((read_doorbell ^ pACB->in_doorbell) & ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE) {
485 writel(0, &phbcmu->host_int_status); /*clear interrupt*/
486 pACB->in_doorbell = read_doorbell;
487 return true;
488 }
489 msleep(10);
490 } /* max 20 seconds */
491 return false;
492 }
493
arcmsr_hbaA_flush_cache(struct AdapterControlBlock * acb)494 static void arcmsr_hbaA_flush_cache(struct AdapterControlBlock *acb)
495 {
496 struct MessageUnit_A __iomem *reg = acb->pmuA;
497 int retry_count = 30;
498 writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, ®->inbound_msgaddr0);
499 do {
500 if (arcmsr_hbaA_wait_msgint_ready(acb))
501 break;
502 else {
503 retry_count--;
504 printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
505 timeout, retry count down = %d \n", acb->host->host_no, retry_count);
506 }
507 } while (retry_count != 0);
508 }
509
arcmsr_hbaB_flush_cache(struct AdapterControlBlock * acb)510 static void arcmsr_hbaB_flush_cache(struct AdapterControlBlock *acb)
511 {
512 struct MessageUnit_B *reg = acb->pmuB;
513 int retry_count = 30;
514 writel(ARCMSR_MESSAGE_FLUSH_CACHE, reg->drv2iop_doorbell);
515 do {
516 if (arcmsr_hbaB_wait_msgint_ready(acb))
517 break;
518 else {
519 retry_count--;
520 printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
521 timeout,retry count down = %d \n", acb->host->host_no, retry_count);
522 }
523 } while (retry_count != 0);
524 }
525
arcmsr_hbaC_flush_cache(struct AdapterControlBlock * pACB)526 static void arcmsr_hbaC_flush_cache(struct AdapterControlBlock *pACB)
527 {
528 struct MessageUnit_C __iomem *reg = pACB->pmuC;
529 int retry_count = 30;/* enlarge wait flush adapter cache time: 10 minute */
530 writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, ®->inbound_msgaddr0);
531 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell);
532 do {
533 if (arcmsr_hbaC_wait_msgint_ready(pACB)) {
534 break;
535 } else {
536 retry_count--;
537 printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
538 timeout,retry count down = %d \n", pACB->host->host_no, retry_count);
539 }
540 } while (retry_count != 0);
541 return;
542 }
543
arcmsr_hbaD_flush_cache(struct AdapterControlBlock * pACB)544 static void arcmsr_hbaD_flush_cache(struct AdapterControlBlock *pACB)
545 {
546 int retry_count = 15;
547 struct MessageUnit_D *reg = pACB->pmuD;
548
549 writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, reg->inbound_msgaddr0);
550 do {
551 if (arcmsr_hbaD_wait_msgint_ready(pACB))
552 break;
553
554 retry_count--;
555 pr_notice("arcmsr%d: wait 'flush adapter "
556 "cache' timeout, retry count down = %d\n",
557 pACB->host->host_no, retry_count);
558 } while (retry_count != 0);
559 }
560
arcmsr_hbaE_flush_cache(struct AdapterControlBlock * pACB)561 static void arcmsr_hbaE_flush_cache(struct AdapterControlBlock *pACB)
562 {
563 int retry_count = 30;
564 struct MessageUnit_E __iomem *reg = pACB->pmuE;
565
566 writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, ®->inbound_msgaddr0);
567 pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
568 writel(pACB->out_doorbell, ®->iobound_doorbell);
569 do {
570 if (arcmsr_hbaE_wait_msgint_ready(pACB))
571 break;
572 retry_count--;
573 pr_notice("arcmsr%d: wait 'flush adapter "
574 "cache' timeout, retry count down = %d\n",
575 pACB->host->host_no, retry_count);
576 } while (retry_count != 0);
577 }
578
arcmsr_flush_adapter_cache(struct AdapterControlBlock * acb)579 static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
580 {
581 switch (acb->adapter_type) {
582
583 case ACB_ADAPTER_TYPE_A:
584 arcmsr_hbaA_flush_cache(acb);
585 break;
586 case ACB_ADAPTER_TYPE_B:
587 arcmsr_hbaB_flush_cache(acb);
588 break;
589 case ACB_ADAPTER_TYPE_C:
590 arcmsr_hbaC_flush_cache(acb);
591 break;
592 case ACB_ADAPTER_TYPE_D:
593 arcmsr_hbaD_flush_cache(acb);
594 break;
595 case ACB_ADAPTER_TYPE_E:
596 case ACB_ADAPTER_TYPE_F:
597 arcmsr_hbaE_flush_cache(acb);
598 break;
599 }
600 }
601
arcmsr_hbaB_assign_regAddr(struct AdapterControlBlock * acb)602 static void arcmsr_hbaB_assign_regAddr(struct AdapterControlBlock *acb)
603 {
604 struct MessageUnit_B *reg = acb->pmuB;
605
606 if (acb->pdev->device == PCI_DEVICE_ID_ARECA_1203) {
607 reg->drv2iop_doorbell = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_1203);
608 reg->drv2iop_doorbell_mask = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_MASK_1203);
609 reg->iop2drv_doorbell = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_1203);
610 reg->iop2drv_doorbell_mask = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_MASK_1203);
611 } else {
612 reg->drv2iop_doorbell= MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL);
613 reg->drv2iop_doorbell_mask = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_MASK);
614 reg->iop2drv_doorbell = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL);
615 reg->iop2drv_doorbell_mask = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_MASK);
616 }
617 reg->message_wbuffer = MEM_BASE1(ARCMSR_MESSAGE_WBUFFER);
618 reg->message_rbuffer = MEM_BASE1(ARCMSR_MESSAGE_RBUFFER);
619 reg->message_rwbuffer = MEM_BASE1(ARCMSR_MESSAGE_RWBUFFER);
620 }
621
arcmsr_hbaD_assign_regAddr(struct AdapterControlBlock * acb)622 static void arcmsr_hbaD_assign_regAddr(struct AdapterControlBlock *acb)
623 {
624 struct MessageUnit_D *reg = acb->pmuD;
625
626 reg->chip_id = MEM_BASE0(ARCMSR_ARC1214_CHIP_ID);
627 reg->cpu_mem_config = MEM_BASE0(ARCMSR_ARC1214_CPU_MEMORY_CONFIGURATION);
628 reg->i2o_host_interrupt_mask = MEM_BASE0(ARCMSR_ARC1214_I2_HOST_INTERRUPT_MASK);
629 reg->sample_at_reset = MEM_BASE0(ARCMSR_ARC1214_SAMPLE_RESET);
630 reg->reset_request = MEM_BASE0(ARCMSR_ARC1214_RESET_REQUEST);
631 reg->host_int_status = MEM_BASE0(ARCMSR_ARC1214_MAIN_INTERRUPT_STATUS);
632 reg->pcief0_int_enable = MEM_BASE0(ARCMSR_ARC1214_PCIE_F0_INTERRUPT_ENABLE);
633 reg->inbound_msgaddr0 = MEM_BASE0(ARCMSR_ARC1214_INBOUND_MESSAGE0);
634 reg->inbound_msgaddr1 = MEM_BASE0(ARCMSR_ARC1214_INBOUND_MESSAGE1);
635 reg->outbound_msgaddr0 = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_MESSAGE0);
636 reg->outbound_msgaddr1 = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_MESSAGE1);
637 reg->inbound_doorbell = MEM_BASE0(ARCMSR_ARC1214_INBOUND_DOORBELL);
638 reg->outbound_doorbell = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_DOORBELL);
639 reg->outbound_doorbell_enable = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_DOORBELL_ENABLE);
640 reg->inboundlist_base_low = MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_BASE_LOW);
641 reg->inboundlist_base_high = MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_BASE_HIGH);
642 reg->inboundlist_write_pointer = MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_WRITE_POINTER);
643 reg->outboundlist_base_low = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_BASE_LOW);
644 reg->outboundlist_base_high = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_BASE_HIGH);
645 reg->outboundlist_copy_pointer = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_COPY_POINTER);
646 reg->outboundlist_read_pointer = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_READ_POINTER);
647 reg->outboundlist_interrupt_cause = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_INTERRUPT_CAUSE);
648 reg->outboundlist_interrupt_enable = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_INTERRUPT_ENABLE);
649 reg->message_wbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_WBUFFER);
650 reg->message_rbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_RBUFFER);
651 reg->msgcode_rwbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_RWBUFFER);
652 }
653
arcmsr_hbaF_assign_regAddr(struct AdapterControlBlock * acb)654 static void arcmsr_hbaF_assign_regAddr(struct AdapterControlBlock *acb)
655 {
656 dma_addr_t host_buffer_dma;
657 struct MessageUnit_F __iomem *pmuF;
658
659 memset(acb->dma_coherent2, 0xff, acb->completeQ_size);
660 acb->message_wbuffer = (uint32_t *)round_up((unsigned long)acb->dma_coherent2 +
661 acb->completeQ_size, 4);
662 acb->message_rbuffer = ((void *)acb->message_wbuffer) + 0x100;
663 acb->msgcode_rwbuffer = ((void *)acb->message_wbuffer) + 0x200;
664 memset((void *)acb->message_wbuffer, 0, MESG_RW_BUFFER_SIZE);
665 host_buffer_dma = round_up(acb->dma_coherent_handle2 + acb->completeQ_size, 4);
666 pmuF = acb->pmuF;
667 /* host buffer low address, bit0:1 all buffer active */
668 writel(lower_32_bits(host_buffer_dma | 1), &pmuF->inbound_msgaddr0);
669 /* host buffer high address */
670 writel(upper_32_bits(host_buffer_dma), &pmuF->inbound_msgaddr1);
671 /* set host buffer physical address */
672 writel(ARCMSR_HBFMU_DOORBELL_SYNC1, &pmuF->iobound_doorbell);
673 }
674
arcmsr_alloc_io_queue(struct AdapterControlBlock * acb)675 static bool arcmsr_alloc_io_queue(struct AdapterControlBlock *acb)
676 {
677 bool rtn = true;
678 void *dma_coherent;
679 dma_addr_t dma_coherent_handle;
680 struct pci_dev *pdev = acb->pdev;
681
682 switch (acb->adapter_type) {
683 case ACB_ADAPTER_TYPE_B: {
684 acb->ioqueue_size = roundup(sizeof(struct MessageUnit_B), 32);
685 dma_coherent = dma_alloc_coherent(&pdev->dev, acb->ioqueue_size,
686 &dma_coherent_handle, GFP_KERNEL);
687 if (!dma_coherent) {
688 pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
689 return false;
690 }
691 acb->dma_coherent_handle2 = dma_coherent_handle;
692 acb->dma_coherent2 = dma_coherent;
693 acb->pmuB = (struct MessageUnit_B *)dma_coherent;
694 arcmsr_hbaB_assign_regAddr(acb);
695 }
696 break;
697 case ACB_ADAPTER_TYPE_D: {
698 acb->ioqueue_size = roundup(sizeof(struct MessageUnit_D), 32);
699 dma_coherent = dma_alloc_coherent(&pdev->dev, acb->ioqueue_size,
700 &dma_coherent_handle, GFP_KERNEL);
701 if (!dma_coherent) {
702 pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
703 return false;
704 }
705 acb->dma_coherent_handle2 = dma_coherent_handle;
706 acb->dma_coherent2 = dma_coherent;
707 acb->pmuD = (struct MessageUnit_D *)dma_coherent;
708 arcmsr_hbaD_assign_regAddr(acb);
709 }
710 break;
711 case ACB_ADAPTER_TYPE_E: {
712 uint32_t completeQ_size;
713 completeQ_size = sizeof(struct deliver_completeQ) * ARCMSR_MAX_HBE_DONEQUEUE + 128;
714 acb->ioqueue_size = roundup(completeQ_size, 32);
715 dma_coherent = dma_alloc_coherent(&pdev->dev, acb->ioqueue_size,
716 &dma_coherent_handle, GFP_KERNEL);
717 if (!dma_coherent){
718 pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
719 return false;
720 }
721 acb->dma_coherent_handle2 = dma_coherent_handle;
722 acb->dma_coherent2 = dma_coherent;
723 acb->pCompletionQ = dma_coherent;
724 acb->completionQ_entry = acb->ioqueue_size / sizeof(struct deliver_completeQ);
725 acb->doneq_index = 0;
726 }
727 break;
728 case ACB_ADAPTER_TYPE_F: {
729 uint32_t QueueDepth;
730 uint32_t depthTbl[] = {256, 512, 1024, 128, 64, 32};
731
732 arcmsr_wait_firmware_ready(acb);
733 QueueDepth = depthTbl[readl(&acb->pmuF->outbound_msgaddr1) & 7];
734 acb->completeQ_size = sizeof(struct deliver_completeQ) * QueueDepth + 128;
735 acb->ioqueue_size = roundup(acb->completeQ_size + MESG_RW_BUFFER_SIZE, 32);
736 dma_coherent = dma_alloc_coherent(&pdev->dev, acb->ioqueue_size,
737 &dma_coherent_handle, GFP_KERNEL);
738 if (!dma_coherent) {
739 pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
740 return false;
741 }
742 acb->dma_coherent_handle2 = dma_coherent_handle;
743 acb->dma_coherent2 = dma_coherent;
744 acb->pCompletionQ = dma_coherent;
745 acb->completionQ_entry = acb->completeQ_size / sizeof(struct deliver_completeQ);
746 acb->doneq_index = 0;
747 arcmsr_hbaF_assign_regAddr(acb);
748 }
749 break;
750 default:
751 break;
752 }
753 return rtn;
754 }
755
arcmsr_alloc_xor_buffer(struct AdapterControlBlock * acb)756 static int arcmsr_alloc_xor_buffer(struct AdapterControlBlock *acb)
757 {
758 int rc = 0;
759 struct pci_dev *pdev = acb->pdev;
760 void *dma_coherent;
761 dma_addr_t dma_coherent_handle;
762 int i, xor_ram;
763 struct Xor_sg *pXorPhys;
764 void **pXorVirt;
765 struct HostRamBuf *pRamBuf;
766
767 // allocate 1 MB * N physically continuous memory for XOR engine.
768 xor_ram = (acb->firm_PicStatus >> 24) & 0x0f;
769 acb->xor_mega = (xor_ram - 1) * 32 + 128 + 3;
770 acb->init2cfg_size = sizeof(struct HostRamBuf) +
771 (sizeof(struct XorHandle) * acb->xor_mega);
772 dma_coherent = dma_alloc_coherent(&pdev->dev, acb->init2cfg_size,
773 &dma_coherent_handle, GFP_KERNEL);
774 acb->xorVirt = dma_coherent;
775 acb->xorPhys = dma_coherent_handle;
776 pXorPhys = (struct Xor_sg *)((unsigned long)dma_coherent +
777 sizeof(struct HostRamBuf));
778 acb->xorVirtOffset = sizeof(struct HostRamBuf) +
779 (sizeof(struct Xor_sg) * acb->xor_mega);
780 pXorVirt = (void **)((unsigned long)dma_coherent +
781 (unsigned long)acb->xorVirtOffset);
782 for (i = 0; i < acb->xor_mega; i++) {
783 dma_coherent = dma_alloc_coherent(&pdev->dev,
784 ARCMSR_XOR_SEG_SIZE,
785 &dma_coherent_handle, GFP_KERNEL);
786 if (dma_coherent) {
787 pXorPhys->xorPhys = dma_coherent_handle;
788 pXorPhys->xorBufLen = ARCMSR_XOR_SEG_SIZE;
789 *pXorVirt = dma_coherent;
790 pXorPhys++;
791 pXorVirt++;
792 } else {
793 pr_info("arcmsr%d: alloc max XOR buffer = 0x%x MB\n",
794 acb->host->host_no, i);
795 rc = -ENOMEM;
796 break;
797 }
798 }
799 pRamBuf = (struct HostRamBuf *)acb->xorVirt;
800 pRamBuf->hrbSignature = 0x53425248; //HRBS
801 pRamBuf->hrbSize = i * ARCMSR_XOR_SEG_SIZE;
802 pRamBuf->hrbRes[0] = 0;
803 pRamBuf->hrbRes[1] = 0;
804 return rc;
805 }
806
arcmsr_alloc_ccb_pool(struct AdapterControlBlock * acb)807 static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
808 {
809 struct pci_dev *pdev = acb->pdev;
810 void *dma_coherent;
811 dma_addr_t dma_coherent_handle;
812 struct CommandControlBlock *ccb_tmp;
813 int i = 0, j = 0;
814 unsigned long cdb_phyaddr, next_ccb_phy;
815 unsigned long roundup_ccbsize;
816 unsigned long max_xfer_len;
817 unsigned long max_sg_entrys;
818 uint32_t firm_config_version, curr_phy_upper32;
819
820 for (i = 0; i < ARCMSR_MAX_TARGETID; i++)
821 for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++)
822 acb->devstate[i][j] = ARECA_RAID_GONE;
823
824 max_xfer_len = ARCMSR_MAX_XFER_LEN;
825 max_sg_entrys = ARCMSR_DEFAULT_SG_ENTRIES;
826 firm_config_version = acb->firm_cfg_version;
827 if((firm_config_version & 0xFF) >= 3){
828 max_xfer_len = (ARCMSR_CDB_SG_PAGE_LENGTH << ((firm_config_version >> 8) & 0xFF)) * 1024;/* max 4M byte */
829 max_sg_entrys = (max_xfer_len/4096);
830 }
831 acb->host->max_sectors = max_xfer_len/512;
832 acb->host->sg_tablesize = max_sg_entrys;
833 roundup_ccbsize = roundup(sizeof(struct CommandControlBlock) + (max_sg_entrys - 1) * sizeof(struct SG64ENTRY), 32);
834 acb->uncache_size = roundup_ccbsize * acb->maxFreeCCB;
835 if (acb->adapter_type != ACB_ADAPTER_TYPE_F)
836 acb->uncache_size += acb->ioqueue_size;
837 dma_coherent = dma_alloc_coherent(&pdev->dev, acb->uncache_size, &dma_coherent_handle, GFP_KERNEL);
838 if(!dma_coherent){
839 printk(KERN_NOTICE "arcmsr%d: dma_alloc_coherent got error\n", acb->host->host_no);
840 return -ENOMEM;
841 }
842 acb->dma_coherent = dma_coherent;
843 acb->dma_coherent_handle = dma_coherent_handle;
844 memset(dma_coherent, 0, acb->uncache_size);
845 acb->ccbsize = roundup_ccbsize;
846 ccb_tmp = dma_coherent;
847 curr_phy_upper32 = upper_32_bits(dma_coherent_handle);
848 acb->vir2phy_offset = (unsigned long)dma_coherent - (unsigned long)dma_coherent_handle;
849 for(i = 0; i < acb->maxFreeCCB; i++){
850 cdb_phyaddr = (unsigned long)dma_coherent_handle + offsetof(struct CommandControlBlock, arcmsr_cdb);
851 switch (acb->adapter_type) {
852 case ACB_ADAPTER_TYPE_A:
853 case ACB_ADAPTER_TYPE_B:
854 ccb_tmp->cdb_phyaddr = cdb_phyaddr >> 5;
855 break;
856 case ACB_ADAPTER_TYPE_C:
857 case ACB_ADAPTER_TYPE_D:
858 case ACB_ADAPTER_TYPE_E:
859 case ACB_ADAPTER_TYPE_F:
860 ccb_tmp->cdb_phyaddr = cdb_phyaddr;
861 break;
862 }
863 acb->pccb_pool[i] = ccb_tmp;
864 ccb_tmp->acb = acb;
865 ccb_tmp->smid = (u32)i << 16;
866 INIT_LIST_HEAD(&ccb_tmp->list);
867 next_ccb_phy = dma_coherent_handle + roundup_ccbsize;
868 if (upper_32_bits(next_ccb_phy) != curr_phy_upper32) {
869 acb->maxFreeCCB = i;
870 acb->host->can_queue = i;
871 break;
872 }
873 else
874 list_add_tail(&ccb_tmp->list, &acb->ccb_free_list);
875 ccb_tmp = (struct CommandControlBlock *)((unsigned long)ccb_tmp + roundup_ccbsize);
876 dma_coherent_handle = next_ccb_phy;
877 }
878 if (acb->adapter_type != ACB_ADAPTER_TYPE_F) {
879 acb->dma_coherent_handle2 = dma_coherent_handle;
880 acb->dma_coherent2 = ccb_tmp;
881 }
882 switch (acb->adapter_type) {
883 case ACB_ADAPTER_TYPE_B:
884 acb->pmuB = (struct MessageUnit_B *)acb->dma_coherent2;
885 arcmsr_hbaB_assign_regAddr(acb);
886 break;
887 case ACB_ADAPTER_TYPE_D:
888 acb->pmuD = (struct MessageUnit_D *)acb->dma_coherent2;
889 arcmsr_hbaD_assign_regAddr(acb);
890 break;
891 case ACB_ADAPTER_TYPE_E:
892 acb->pCompletionQ = acb->dma_coherent2;
893 acb->completionQ_entry = acb->ioqueue_size / sizeof(struct deliver_completeQ);
894 acb->doneq_index = 0;
895 break;
896 }
897 if ((acb->firm_PicStatus >> 24) & 0x0f) {
898 if (arcmsr_alloc_xor_buffer(acb))
899 return -ENOMEM;
900 }
901 return 0;
902 }
903
arcmsr_message_isr_bh_fn(struct work_struct * work)904 static void arcmsr_message_isr_bh_fn(struct work_struct *work)
905 {
906 struct AdapterControlBlock *acb = container_of(work,
907 struct AdapterControlBlock, arcmsr_do_message_isr_bh);
908 char *acb_dev_map = (char *)acb->device_map;
909 uint32_t __iomem *signature = NULL;
910 char __iomem *devicemap = NULL;
911 int target, lun;
912 struct scsi_device *psdev;
913 char diff, temp;
914
915 switch (acb->adapter_type) {
916 case ACB_ADAPTER_TYPE_A: {
917 struct MessageUnit_A __iomem *reg = acb->pmuA;
918
919 signature = (uint32_t __iomem *)(®->message_rwbuffer[0]);
920 devicemap = (char __iomem *)(®->message_rwbuffer[21]);
921 break;
922 }
923 case ACB_ADAPTER_TYPE_B: {
924 struct MessageUnit_B *reg = acb->pmuB;
925
926 signature = (uint32_t __iomem *)(®->message_rwbuffer[0]);
927 devicemap = (char __iomem *)(®->message_rwbuffer[21]);
928 break;
929 }
930 case ACB_ADAPTER_TYPE_C: {
931 struct MessageUnit_C __iomem *reg = acb->pmuC;
932
933 signature = (uint32_t __iomem *)(®->msgcode_rwbuffer[0]);
934 devicemap = (char __iomem *)(®->msgcode_rwbuffer[21]);
935 break;
936 }
937 case ACB_ADAPTER_TYPE_D: {
938 struct MessageUnit_D *reg = acb->pmuD;
939
940 signature = (uint32_t __iomem *)(®->msgcode_rwbuffer[0]);
941 devicemap = (char __iomem *)(®->msgcode_rwbuffer[21]);
942 break;
943 }
944 case ACB_ADAPTER_TYPE_E: {
945 struct MessageUnit_E __iomem *reg = acb->pmuE;
946
947 signature = (uint32_t __iomem *)(®->msgcode_rwbuffer[0]);
948 devicemap = (char __iomem *)(®->msgcode_rwbuffer[21]);
949 break;
950 }
951 case ACB_ADAPTER_TYPE_F: {
952 signature = (uint32_t __iomem *)(&acb->msgcode_rwbuffer[0]);
953 devicemap = (char __iomem *)(&acb->msgcode_rwbuffer[21]);
954 break;
955 }
956 }
957 if (readl(signature) != ARCMSR_SIGNATURE_GET_CONFIG)
958 return;
959 for (target = 0; target < ARCMSR_MAX_TARGETID - 1;
960 target++) {
961 temp = readb(devicemap);
962 diff = (*acb_dev_map) ^ temp;
963 if (diff != 0) {
964 *acb_dev_map = temp;
965 for (lun = 0; lun < ARCMSR_MAX_TARGETLUN;
966 lun++) {
967 if ((diff & 0x01) == 1 &&
968 (temp & 0x01) == 1) {
969 scsi_add_device(acb->host,
970 0, target, lun);
971 } else if ((diff & 0x01) == 1
972 && (temp & 0x01) == 0) {
973 psdev = scsi_device_lookup(acb->host,
974 0, target, lun);
975 if (psdev != NULL) {
976 scsi_remove_device(psdev);
977 scsi_device_put(psdev);
978 }
979 }
980 temp >>= 1;
981 diff >>= 1;
982 }
983 }
984 devicemap++;
985 acb_dev_map++;
986 }
987 acb->acb_flags &= ~ACB_F_MSG_GET_CONFIG;
988 }
989
990 static int
arcmsr_request_irq(struct pci_dev * pdev,struct AdapterControlBlock * acb)991 arcmsr_request_irq(struct pci_dev *pdev, struct AdapterControlBlock *acb)
992 {
993 unsigned long flags;
994 int nvec, i;
995
996 if (msix_enable == 0)
997 goto msi_int0;
998 nvec = pci_alloc_irq_vectors(pdev, 1, ARCMST_NUM_MSIX_VECTORS,
999 PCI_IRQ_MSIX);
1000 if (nvec > 0) {
1001 pr_info("arcmsr%d: msi-x enabled\n", acb->host->host_no);
1002 flags = 0;
1003 } else {
1004 msi_int0:
1005 if (msi_enable == 1) {
1006 nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
1007 if (nvec == 1) {
1008 dev_info(&pdev->dev, "msi enabled\n");
1009 goto msi_int1;
1010 }
1011 }
1012 nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_INTX);
1013 if (nvec < 1)
1014 return FAILED;
1015 msi_int1:
1016 flags = IRQF_SHARED;
1017 }
1018
1019 acb->vector_count = nvec;
1020 for (i = 0; i < nvec; i++) {
1021 if (request_irq(pci_irq_vector(pdev, i), arcmsr_do_interrupt,
1022 flags, "arcmsr", acb)) {
1023 pr_warn("arcmsr%d: request_irq =%d failed!\n",
1024 acb->host->host_no, pci_irq_vector(pdev, i));
1025 goto out_free_irq;
1026 }
1027 }
1028
1029 return SUCCESS;
1030 out_free_irq:
1031 while (--i >= 0)
1032 free_irq(pci_irq_vector(pdev, i), acb);
1033 pci_free_irq_vectors(pdev);
1034 return FAILED;
1035 }
1036
arcmsr_init_get_devmap_timer(struct AdapterControlBlock * pacb)1037 static void arcmsr_init_get_devmap_timer(struct AdapterControlBlock *pacb)
1038 {
1039 INIT_WORK(&pacb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn);
1040 pacb->fw_flag = FW_NORMAL;
1041 timer_setup(&pacb->eternal_timer, arcmsr_request_device_map, 0);
1042 pacb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ);
1043 add_timer(&pacb->eternal_timer);
1044 }
1045
arcmsr_init_set_datetime_timer(struct AdapterControlBlock * pacb)1046 static void arcmsr_init_set_datetime_timer(struct AdapterControlBlock *pacb)
1047 {
1048 timer_setup(&pacb->refresh_timer, arcmsr_set_iop_datetime, 0);
1049 pacb->refresh_timer.expires = jiffies + secs_to_jiffies(60);
1050 add_timer(&pacb->refresh_timer);
1051 }
1052
arcmsr_set_dma_mask(struct AdapterControlBlock * acb)1053 static int arcmsr_set_dma_mask(struct AdapterControlBlock *acb)
1054 {
1055 struct pci_dev *pcidev = acb->pdev;
1056
1057 if (IS_DMA64) {
1058 if (((acb->adapter_type == ACB_ADAPTER_TYPE_A) && !dma_mask_64) ||
1059 dma_set_mask(&pcidev->dev, DMA_BIT_MASK(64)))
1060 goto dma32;
1061 if (acb->adapter_type <= ACB_ADAPTER_TYPE_B)
1062 return 0;
1063 if (dma_set_coherent_mask(&pcidev->dev, DMA_BIT_MASK(64)) ||
1064 dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(64))) {
1065 printk("arcmsr: set DMA 64 mask failed\n");
1066 return -ENXIO;
1067 }
1068 } else {
1069 dma32:
1070 if (dma_set_mask(&pcidev->dev, DMA_BIT_MASK(32)) ||
1071 dma_set_coherent_mask(&pcidev->dev, DMA_BIT_MASK(32)) ||
1072 dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32))) {
1073 printk("arcmsr: set DMA 32-bit mask failed\n");
1074 return -ENXIO;
1075 }
1076 }
1077 return 0;
1078 }
1079
arcmsr_probe(struct pci_dev * pdev,const struct pci_device_id * id)1080 static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1081 {
1082 struct Scsi_Host *host;
1083 struct AdapterControlBlock *acb;
1084 uint8_t bus,dev_fun;
1085 int error;
1086 error = pci_enable_device(pdev);
1087 if(error){
1088 return -ENODEV;
1089 }
1090 host = scsi_host_alloc(&arcmsr_scsi_host_template, sizeof(struct AdapterControlBlock));
1091 if(!host){
1092 goto pci_disable_dev;
1093 }
1094 init_waitqueue_head(&wait_q);
1095 bus = pdev->bus->number;
1096 dev_fun = pdev->devfn;
1097 acb = (struct AdapterControlBlock *) host->hostdata;
1098 memset(acb,0,sizeof(struct AdapterControlBlock));
1099 acb->pdev = pdev;
1100 acb->adapter_type = id->driver_data;
1101 if (arcmsr_set_dma_mask(acb))
1102 goto scsi_host_release;
1103 acb->host = host;
1104 host->max_lun = ARCMSR_MAX_TARGETLUN;
1105 host->max_id = ARCMSR_MAX_TARGETID; /*16:8*/
1106 host->max_cmd_len = 16; /*this is issue of 64bit LBA ,over 2T byte*/
1107 if ((host_can_queue < ARCMSR_MIN_OUTSTANDING_CMD) || (host_can_queue > ARCMSR_MAX_OUTSTANDING_CMD))
1108 host_can_queue = ARCMSR_DEFAULT_OUTSTANDING_CMD;
1109 host->can_queue = host_can_queue; /* max simultaneous cmds */
1110 if ((cmd_per_lun < ARCMSR_MIN_CMD_PERLUN) || (cmd_per_lun > ARCMSR_MAX_CMD_PERLUN))
1111 cmd_per_lun = ARCMSR_DEFAULT_CMD_PERLUN;
1112 host->cmd_per_lun = cmd_per_lun;
1113 host->this_id = ARCMSR_SCSI_INITIATOR_ID;
1114 host->unique_id = (bus << 8) | dev_fun;
1115 pci_set_drvdata(pdev, host);
1116 pci_set_master(pdev);
1117 error = pci_request_regions(pdev, "arcmsr");
1118 if(error){
1119 goto scsi_host_release;
1120 }
1121 spin_lock_init(&acb->eh_lock);
1122 spin_lock_init(&acb->ccblist_lock);
1123 spin_lock_init(&acb->postq_lock);
1124 spin_lock_init(&acb->doneq_lock);
1125 spin_lock_init(&acb->rqbuffer_lock);
1126 spin_lock_init(&acb->wqbuffer_lock);
1127 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
1128 ACB_F_MESSAGE_RQBUFFER_CLEARED |
1129 ACB_F_MESSAGE_WQBUFFER_READED);
1130 acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
1131 INIT_LIST_HEAD(&acb->ccb_free_list);
1132 error = arcmsr_remap_pciregion(acb);
1133 if(!error){
1134 goto pci_release_regs;
1135 }
1136 error = arcmsr_alloc_io_queue(acb);
1137 if (!error)
1138 goto unmap_pci_region;
1139 error = arcmsr_get_firmware_spec(acb);
1140 if(!error){
1141 goto free_hbb_mu;
1142 }
1143 if (acb->adapter_type != ACB_ADAPTER_TYPE_F)
1144 arcmsr_free_io_queue(acb);
1145 error = arcmsr_alloc_ccb_pool(acb);
1146 if(error){
1147 goto unmap_pci_region;
1148 }
1149 error = scsi_add_host(host, &pdev->dev);
1150 if(error){
1151 goto free_ccb_pool;
1152 }
1153 if (arcmsr_request_irq(pdev, acb) == FAILED)
1154 goto scsi_host_remove;
1155 arcmsr_iop_init(acb);
1156 arcmsr_init_get_devmap_timer(acb);
1157 if (set_date_time)
1158 arcmsr_init_set_datetime_timer(acb);
1159 if(arcmsr_alloc_sysfs_attr(acb))
1160 goto out_free_sysfs;
1161 scsi_scan_host(host);
1162 return 0;
1163 out_free_sysfs:
1164 if (set_date_time)
1165 timer_delete_sync(&acb->refresh_timer);
1166 timer_delete_sync(&acb->eternal_timer);
1167 flush_work(&acb->arcmsr_do_message_isr_bh);
1168 arcmsr_stop_adapter_bgrb(acb);
1169 arcmsr_flush_adapter_cache(acb);
1170 arcmsr_free_irq(pdev, acb);
1171 scsi_host_remove:
1172 scsi_remove_host(host);
1173 free_ccb_pool:
1174 arcmsr_free_ccb_pool(acb);
1175 goto unmap_pci_region;
1176 free_hbb_mu:
1177 arcmsr_free_io_queue(acb);
1178 unmap_pci_region:
1179 arcmsr_unmap_pciregion(acb);
1180 pci_release_regs:
1181 pci_release_regions(pdev);
1182 scsi_host_release:
1183 scsi_host_put(host);
1184 pci_disable_dev:
1185 pci_disable_device(pdev);
1186 return -ENODEV;
1187 }
1188
arcmsr_free_irq(struct pci_dev * pdev,struct AdapterControlBlock * acb)1189 static void arcmsr_free_irq(struct pci_dev *pdev,
1190 struct AdapterControlBlock *acb)
1191 {
1192 int i;
1193
1194 for (i = 0; i < acb->vector_count; i++)
1195 free_irq(pci_irq_vector(pdev, i), acb);
1196 pci_free_irq_vectors(pdev);
1197 }
1198
arcmsr_suspend(struct device * dev)1199 static int __maybe_unused arcmsr_suspend(struct device *dev)
1200 {
1201 struct pci_dev *pdev = to_pci_dev(dev);
1202 struct Scsi_Host *host = pci_get_drvdata(pdev);
1203 struct AdapterControlBlock *acb =
1204 (struct AdapterControlBlock *)host->hostdata;
1205
1206 arcmsr_disable_outbound_ints(acb);
1207 arcmsr_free_irq(pdev, acb);
1208 timer_delete_sync(&acb->eternal_timer);
1209 if (set_date_time)
1210 timer_delete_sync(&acb->refresh_timer);
1211 flush_work(&acb->arcmsr_do_message_isr_bh);
1212 arcmsr_stop_adapter_bgrb(acb);
1213 arcmsr_flush_adapter_cache(acb);
1214 return 0;
1215 }
1216
arcmsr_resume(struct device * dev)1217 static int __maybe_unused arcmsr_resume(struct device *dev)
1218 {
1219 struct pci_dev *pdev = to_pci_dev(dev);
1220 struct Scsi_Host *host = pci_get_drvdata(pdev);
1221 struct AdapterControlBlock *acb =
1222 (struct AdapterControlBlock *)host->hostdata;
1223
1224 if (arcmsr_set_dma_mask(acb))
1225 goto controller_unregister;
1226 if (arcmsr_request_irq(pdev, acb) == FAILED)
1227 goto controller_stop;
1228 switch (acb->adapter_type) {
1229 case ACB_ADAPTER_TYPE_B: {
1230 struct MessageUnit_B *reg = acb->pmuB;
1231 uint32_t i;
1232 for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
1233 reg->post_qbuffer[i] = 0;
1234 reg->done_qbuffer[i] = 0;
1235 }
1236 reg->postq_index = 0;
1237 reg->doneq_index = 0;
1238 break;
1239 }
1240 case ACB_ADAPTER_TYPE_E:
1241 writel(0, &acb->pmuE->host_int_status);
1242 writel(ARCMSR_HBEMU_DOORBELL_SYNC, &acb->pmuE->iobound_doorbell);
1243 acb->in_doorbell = 0;
1244 acb->out_doorbell = 0;
1245 acb->doneq_index = 0;
1246 break;
1247 case ACB_ADAPTER_TYPE_F:
1248 writel(0, &acb->pmuF->host_int_status);
1249 writel(ARCMSR_HBFMU_DOORBELL_SYNC, &acb->pmuF->iobound_doorbell);
1250 acb->in_doorbell = 0;
1251 acb->out_doorbell = 0;
1252 acb->doneq_index = 0;
1253 arcmsr_hbaF_assign_regAddr(acb);
1254 break;
1255 }
1256 arcmsr_iop_init(acb);
1257 arcmsr_init_get_devmap_timer(acb);
1258 if (set_date_time)
1259 arcmsr_init_set_datetime_timer(acb);
1260 return 0;
1261 controller_stop:
1262 arcmsr_stop_adapter_bgrb(acb);
1263 arcmsr_flush_adapter_cache(acb);
1264 controller_unregister:
1265 scsi_remove_host(host);
1266 arcmsr_free_ccb_pool(acb);
1267 if (acb->adapter_type == ACB_ADAPTER_TYPE_F)
1268 arcmsr_free_io_queue(acb);
1269 arcmsr_unmap_pciregion(acb);
1270 scsi_host_put(host);
1271 return -ENODEV;
1272 }
1273
arcmsr_hbaA_abort_allcmd(struct AdapterControlBlock * acb)1274 static uint8_t arcmsr_hbaA_abort_allcmd(struct AdapterControlBlock *acb)
1275 {
1276 struct MessageUnit_A __iomem *reg = acb->pmuA;
1277 writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, ®->inbound_msgaddr0);
1278 if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
1279 printk(KERN_NOTICE
1280 "arcmsr%d: wait 'abort all outstanding command' timeout\n"
1281 , acb->host->host_no);
1282 return false;
1283 }
1284 return true;
1285 }
1286
arcmsr_hbaB_abort_allcmd(struct AdapterControlBlock * acb)1287 static uint8_t arcmsr_hbaB_abort_allcmd(struct AdapterControlBlock *acb)
1288 {
1289 struct MessageUnit_B *reg = acb->pmuB;
1290
1291 writel(ARCMSR_MESSAGE_ABORT_CMD, reg->drv2iop_doorbell);
1292 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
1293 printk(KERN_NOTICE
1294 "arcmsr%d: wait 'abort all outstanding command' timeout\n"
1295 , acb->host->host_no);
1296 return false;
1297 }
1298 return true;
1299 }
arcmsr_hbaC_abort_allcmd(struct AdapterControlBlock * pACB)1300 static uint8_t arcmsr_hbaC_abort_allcmd(struct AdapterControlBlock *pACB)
1301 {
1302 struct MessageUnit_C __iomem *reg = pACB->pmuC;
1303 writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, ®->inbound_msgaddr0);
1304 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell);
1305 if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
1306 printk(KERN_NOTICE
1307 "arcmsr%d: wait 'abort all outstanding command' timeout\n"
1308 , pACB->host->host_no);
1309 return false;
1310 }
1311 return true;
1312 }
1313
arcmsr_hbaD_abort_allcmd(struct AdapterControlBlock * pACB)1314 static uint8_t arcmsr_hbaD_abort_allcmd(struct AdapterControlBlock *pACB)
1315 {
1316 struct MessageUnit_D *reg = pACB->pmuD;
1317
1318 writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, reg->inbound_msgaddr0);
1319 if (!arcmsr_hbaD_wait_msgint_ready(pACB)) {
1320 pr_notice("arcmsr%d: wait 'abort all outstanding "
1321 "command' timeout\n", pACB->host->host_no);
1322 return false;
1323 }
1324 return true;
1325 }
1326
arcmsr_hbaE_abort_allcmd(struct AdapterControlBlock * pACB)1327 static uint8_t arcmsr_hbaE_abort_allcmd(struct AdapterControlBlock *pACB)
1328 {
1329 struct MessageUnit_E __iomem *reg = pACB->pmuE;
1330
1331 writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, ®->inbound_msgaddr0);
1332 pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
1333 writel(pACB->out_doorbell, ®->iobound_doorbell);
1334 if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
1335 pr_notice("arcmsr%d: wait 'abort all outstanding "
1336 "command' timeout\n", pACB->host->host_no);
1337 return false;
1338 }
1339 return true;
1340 }
1341
arcmsr_abort_allcmd(struct AdapterControlBlock * acb)1342 static uint8_t arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
1343 {
1344 uint8_t rtnval = 0;
1345 switch (acb->adapter_type) {
1346 case ACB_ADAPTER_TYPE_A:
1347 rtnval = arcmsr_hbaA_abort_allcmd(acb);
1348 break;
1349 case ACB_ADAPTER_TYPE_B:
1350 rtnval = arcmsr_hbaB_abort_allcmd(acb);
1351 break;
1352 case ACB_ADAPTER_TYPE_C:
1353 rtnval = arcmsr_hbaC_abort_allcmd(acb);
1354 break;
1355 case ACB_ADAPTER_TYPE_D:
1356 rtnval = arcmsr_hbaD_abort_allcmd(acb);
1357 break;
1358 case ACB_ADAPTER_TYPE_E:
1359 case ACB_ADAPTER_TYPE_F:
1360 rtnval = arcmsr_hbaE_abort_allcmd(acb);
1361 break;
1362 }
1363 return rtnval;
1364 }
1365
arcmsr_ccb_complete(struct CommandControlBlock * ccb)1366 static void arcmsr_ccb_complete(struct CommandControlBlock *ccb)
1367 {
1368 struct AdapterControlBlock *acb = ccb->acb;
1369 struct scsi_cmnd *pcmd = ccb->pcmd;
1370 unsigned long flags;
1371 atomic_dec(&acb->ccboutstandingcount);
1372 scsi_dma_unmap(ccb->pcmd);
1373 ccb->startdone = ARCMSR_CCB_DONE;
1374 spin_lock_irqsave(&acb->ccblist_lock, flags);
1375 list_add_tail(&ccb->list, &acb->ccb_free_list);
1376 spin_unlock_irqrestore(&acb->ccblist_lock, flags);
1377 scsi_done(pcmd);
1378 }
1379
arcmsr_report_sense_info(struct CommandControlBlock * ccb)1380 static void arcmsr_report_sense_info(struct CommandControlBlock *ccb)
1381 {
1382 struct scsi_cmnd *pcmd = ccb->pcmd;
1383
1384 pcmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
1385 if (pcmd->sense_buffer) {
1386 struct SENSE_DATA *sensebuffer;
1387
1388 memcpy_and_pad(pcmd->sense_buffer,
1389 SCSI_SENSE_BUFFERSIZE,
1390 ccb->arcmsr_cdb.SenseData,
1391 sizeof(ccb->arcmsr_cdb.SenseData),
1392 0);
1393
1394 sensebuffer = (struct SENSE_DATA *)pcmd->sense_buffer;
1395 sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS;
1396 sensebuffer->Valid = 1;
1397 }
1398 }
1399
arcmsr_disable_outbound_ints(struct AdapterControlBlock * acb)1400 static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb)
1401 {
1402 u32 orig_mask = 0;
1403 switch (acb->adapter_type) {
1404 case ACB_ADAPTER_TYPE_A : {
1405 struct MessageUnit_A __iomem *reg = acb->pmuA;
1406 orig_mask = readl(®->outbound_intmask);
1407 writel(orig_mask|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE, \
1408 ®->outbound_intmask);
1409 }
1410 break;
1411 case ACB_ADAPTER_TYPE_B : {
1412 struct MessageUnit_B *reg = acb->pmuB;
1413 orig_mask = readl(reg->iop2drv_doorbell_mask);
1414 writel(0, reg->iop2drv_doorbell_mask);
1415 }
1416 break;
1417 case ACB_ADAPTER_TYPE_C:{
1418 struct MessageUnit_C __iomem *reg = acb->pmuC;
1419 /* disable all outbound interrupt */
1420 orig_mask = readl(®->host_int_mask); /* disable outbound message0 int */
1421 writel(orig_mask|ARCMSR_HBCMU_ALL_INTMASKENABLE, ®->host_int_mask);
1422 }
1423 break;
1424 case ACB_ADAPTER_TYPE_D: {
1425 struct MessageUnit_D *reg = acb->pmuD;
1426 /* disable all outbound interrupt */
1427 writel(ARCMSR_ARC1214_ALL_INT_DISABLE, reg->pcief0_int_enable);
1428 }
1429 break;
1430 case ACB_ADAPTER_TYPE_E:
1431 case ACB_ADAPTER_TYPE_F: {
1432 struct MessageUnit_E __iomem *reg = acb->pmuE;
1433 orig_mask = readl(®->host_int_mask);
1434 writel(orig_mask | ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR | ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR, ®->host_int_mask);
1435 readl(®->host_int_mask); /* Dummy readl to force pci flush */
1436 }
1437 break;
1438 }
1439 return orig_mask;
1440 }
1441
arcmsr_report_ccb_state(struct AdapterControlBlock * acb,struct CommandControlBlock * ccb,bool error)1442 static void arcmsr_report_ccb_state(struct AdapterControlBlock *acb,
1443 struct CommandControlBlock *ccb, bool error)
1444 {
1445 uint8_t id, lun;
1446 id = ccb->pcmd->device->id;
1447 lun = ccb->pcmd->device->lun;
1448 if (!error) {
1449 if (acb->devstate[id][lun] == ARECA_RAID_GONE)
1450 acb->devstate[id][lun] = ARECA_RAID_GOOD;
1451 ccb->pcmd->result = DID_OK << 16;
1452 arcmsr_ccb_complete(ccb);
1453 }else{
1454 switch (ccb->arcmsr_cdb.DeviceStatus) {
1455 case ARCMSR_DEV_SELECT_TIMEOUT: {
1456 acb->devstate[id][lun] = ARECA_RAID_GONE;
1457 ccb->pcmd->result = DID_NO_CONNECT << 16;
1458 arcmsr_ccb_complete(ccb);
1459 }
1460 break;
1461
1462 case ARCMSR_DEV_ABORTED:
1463
1464 case ARCMSR_DEV_INIT_FAIL: {
1465 acb->devstate[id][lun] = ARECA_RAID_GONE;
1466 ccb->pcmd->result = DID_BAD_TARGET << 16;
1467 arcmsr_ccb_complete(ccb);
1468 }
1469 break;
1470
1471 case ARCMSR_DEV_CHECK_CONDITION: {
1472 acb->devstate[id][lun] = ARECA_RAID_GOOD;
1473 arcmsr_report_sense_info(ccb);
1474 arcmsr_ccb_complete(ccb);
1475 }
1476 break;
1477
1478 default:
1479 printk(KERN_NOTICE
1480 "arcmsr%d: scsi id = %d lun = %d isr get command error done, \
1481 but got unknown DeviceStatus = 0x%x \n"
1482 , acb->host->host_no
1483 , id
1484 , lun
1485 , ccb->arcmsr_cdb.DeviceStatus);
1486 acb->devstate[id][lun] = ARECA_RAID_GONE;
1487 ccb->pcmd->result = DID_NO_CONNECT << 16;
1488 arcmsr_ccb_complete(ccb);
1489 break;
1490 }
1491 }
1492 }
1493
arcmsr_drain_donequeue(struct AdapterControlBlock * acb,struct CommandControlBlock * pCCB,bool error)1494 static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, struct CommandControlBlock *pCCB, bool error)
1495 {
1496 if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) {
1497 if (pCCB->startdone == ARCMSR_CCB_ABORTED) {
1498 struct scsi_cmnd *abortcmd = pCCB->pcmd;
1499 if (abortcmd) {
1500 abortcmd->result |= DID_ABORT << 16;
1501 arcmsr_ccb_complete(pCCB);
1502 printk(KERN_NOTICE "arcmsr%d: pCCB ='0x%p' isr got aborted command \n",
1503 acb->host->host_no, pCCB);
1504 }
1505 return;
1506 }
1507 printk(KERN_NOTICE "arcmsr%d: isr get an illegal ccb command \
1508 done acb = '0x%p'"
1509 "ccb = '0x%p' ccbacb = '0x%p' startdone = 0x%x"
1510 " ccboutstandingcount = %d \n"
1511 , acb->host->host_no
1512 , acb
1513 , pCCB
1514 , pCCB->acb
1515 , pCCB->startdone
1516 , atomic_read(&acb->ccboutstandingcount));
1517 return;
1518 }
1519 arcmsr_report_ccb_state(acb, pCCB, error);
1520 }
1521
arcmsr_done4abort_postqueue(struct AdapterControlBlock * acb)1522 static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
1523 {
1524 int i = 0;
1525 uint32_t flag_ccb;
1526 struct ARCMSR_CDB *pARCMSR_CDB;
1527 bool error;
1528 struct CommandControlBlock *pCCB;
1529 unsigned long ccb_cdb_phy;
1530
1531 switch (acb->adapter_type) {
1532
1533 case ACB_ADAPTER_TYPE_A: {
1534 struct MessageUnit_A __iomem *reg = acb->pmuA;
1535 uint32_t outbound_intstatus;
1536 outbound_intstatus = readl(®->outbound_intstatus) &
1537 acb->outbound_int_enable;
1538 /*clear and abort all outbound posted Q*/
1539 writel(outbound_intstatus, ®->outbound_intstatus);/*clear interrupt*/
1540 while(((flag_ccb = readl(®->outbound_queueport)) != 0xFFFFFFFF)
1541 && (i++ < acb->maxOutstanding)) {
1542 ccb_cdb_phy = (flag_ccb << 5) & 0xffffffff;
1543 if (acb->cdb_phyadd_hipart)
1544 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
1545 pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
1546 pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
1547 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
1548 arcmsr_drain_donequeue(acb, pCCB, error);
1549 }
1550 }
1551 break;
1552
1553 case ACB_ADAPTER_TYPE_B: {
1554 struct MessageUnit_B *reg = acb->pmuB;
1555 /*clear all outbound posted Q*/
1556 writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); /* clear doorbell interrupt */
1557 for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
1558 flag_ccb = reg->done_qbuffer[i];
1559 if (flag_ccb != 0) {
1560 reg->done_qbuffer[i] = 0;
1561 ccb_cdb_phy = (flag_ccb << 5) & 0xffffffff;
1562 if (acb->cdb_phyadd_hipart)
1563 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
1564 pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
1565 pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
1566 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
1567 arcmsr_drain_donequeue(acb, pCCB, error);
1568 }
1569 reg->post_qbuffer[i] = 0;
1570 }
1571 reg->doneq_index = 0;
1572 reg->postq_index = 0;
1573 }
1574 break;
1575 case ACB_ADAPTER_TYPE_C: {
1576 struct MessageUnit_C __iomem *reg = acb->pmuC;
1577 while ((readl(®->host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) && (i++ < acb->maxOutstanding)) {
1578 /*need to do*/
1579 flag_ccb = readl(®->outbound_queueport_low);
1580 ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
1581 if (acb->cdb_phyadd_hipart)
1582 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
1583 pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
1584 pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
1585 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
1586 arcmsr_drain_donequeue(acb, pCCB, error);
1587 }
1588 }
1589 break;
1590 case ACB_ADAPTER_TYPE_D: {
1591 struct MessageUnit_D *pmu = acb->pmuD;
1592 uint32_t outbound_write_pointer;
1593 uint32_t doneq_index, index_stripped, addressLow, residual, toggle;
1594 unsigned long flags;
1595
1596 residual = atomic_read(&acb->ccboutstandingcount);
1597 for (i = 0; i < residual; i++) {
1598 spin_lock_irqsave(&acb->doneq_lock, flags);
1599 outbound_write_pointer =
1600 pmu->done_qbuffer[0].addressLow + 1;
1601 doneq_index = pmu->doneq_index;
1602 if ((doneq_index & 0xFFF) !=
1603 (outbound_write_pointer & 0xFFF)) {
1604 toggle = doneq_index & 0x4000;
1605 index_stripped = (doneq_index & 0xFFF) + 1;
1606 index_stripped %= ARCMSR_MAX_ARC1214_DONEQUEUE;
1607 pmu->doneq_index = index_stripped ? (index_stripped | toggle) :
1608 ((toggle ^ 0x4000) + 1);
1609 doneq_index = pmu->doneq_index;
1610 spin_unlock_irqrestore(&acb->doneq_lock, flags);
1611 addressLow = pmu->done_qbuffer[doneq_index &
1612 0xFFF].addressLow;
1613 ccb_cdb_phy = (addressLow & 0xFFFFFFF0);
1614 if (acb->cdb_phyadd_hipart)
1615 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
1616 pARCMSR_CDB = (struct ARCMSR_CDB *)
1617 (acb->vir2phy_offset + ccb_cdb_phy);
1618 pCCB = container_of(pARCMSR_CDB,
1619 struct CommandControlBlock, arcmsr_cdb);
1620 error = (addressLow &
1621 ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ?
1622 true : false;
1623 arcmsr_drain_donequeue(acb, pCCB, error);
1624 writel(doneq_index,
1625 pmu->outboundlist_read_pointer);
1626 } else {
1627 spin_unlock_irqrestore(&acb->doneq_lock, flags);
1628 mdelay(10);
1629 }
1630 }
1631 pmu->postq_index = 0;
1632 pmu->doneq_index = 0x40FF;
1633 }
1634 break;
1635 case ACB_ADAPTER_TYPE_E:
1636 arcmsr_hbaE_postqueue_isr(acb);
1637 break;
1638 case ACB_ADAPTER_TYPE_F:
1639 arcmsr_hbaF_postqueue_isr(acb);
1640 break;
1641 }
1642 }
1643
arcmsr_remove_scsi_devices(struct AdapterControlBlock * acb)1644 static void arcmsr_remove_scsi_devices(struct AdapterControlBlock *acb)
1645 {
1646 char *acb_dev_map = (char *)acb->device_map;
1647 int target, lun, i;
1648 struct scsi_device *psdev;
1649 struct CommandControlBlock *ccb;
1650 char temp;
1651
1652 for (i = 0; i < acb->maxFreeCCB; i++) {
1653 ccb = acb->pccb_pool[i];
1654 if (ccb->startdone == ARCMSR_CCB_START) {
1655 ccb->pcmd->result = DID_NO_CONNECT << 16;
1656 scsi_dma_unmap(ccb->pcmd);
1657 scsi_done(ccb->pcmd);
1658 }
1659 }
1660 for (target = 0; target < ARCMSR_MAX_TARGETID; target++) {
1661 temp = *acb_dev_map;
1662 if (temp) {
1663 for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) {
1664 if (temp & 1) {
1665 psdev = scsi_device_lookup(acb->host,
1666 0, target, lun);
1667 if (psdev != NULL) {
1668 scsi_remove_device(psdev);
1669 scsi_device_put(psdev);
1670 }
1671 }
1672 temp >>= 1;
1673 }
1674 *acb_dev_map = 0;
1675 }
1676 acb_dev_map++;
1677 }
1678 }
1679
arcmsr_free_pcidev(struct AdapterControlBlock * acb)1680 static void arcmsr_free_pcidev(struct AdapterControlBlock *acb)
1681 {
1682 struct pci_dev *pdev;
1683 struct Scsi_Host *host;
1684
1685 host = acb->host;
1686 arcmsr_free_sysfs_attr(acb);
1687 scsi_remove_host(host);
1688 flush_work(&acb->arcmsr_do_message_isr_bh);
1689 timer_delete_sync(&acb->eternal_timer);
1690 if (set_date_time)
1691 timer_delete_sync(&acb->refresh_timer);
1692 pdev = acb->pdev;
1693 arcmsr_free_irq(pdev, acb);
1694 arcmsr_free_ccb_pool(acb);
1695 if (acb->adapter_type == ACB_ADAPTER_TYPE_F)
1696 arcmsr_free_io_queue(acb);
1697 arcmsr_unmap_pciregion(acb);
1698 pci_release_regions(pdev);
1699 scsi_host_put(host);
1700 pci_disable_device(pdev);
1701 }
1702
arcmsr_remove(struct pci_dev * pdev)1703 static void arcmsr_remove(struct pci_dev *pdev)
1704 {
1705 struct Scsi_Host *host = pci_get_drvdata(pdev);
1706 struct AdapterControlBlock *acb =
1707 (struct AdapterControlBlock *) host->hostdata;
1708 int poll_count = 0;
1709 uint16_t dev_id;
1710
1711 pci_read_config_word(pdev, PCI_DEVICE_ID, &dev_id);
1712 if (dev_id == 0xffff) {
1713 acb->acb_flags &= ~ACB_F_IOP_INITED;
1714 acb->acb_flags |= ACB_F_ADAPTER_REMOVED;
1715 arcmsr_remove_scsi_devices(acb);
1716 arcmsr_free_pcidev(acb);
1717 return;
1718 }
1719 arcmsr_free_sysfs_attr(acb);
1720 scsi_remove_host(host);
1721 flush_work(&acb->arcmsr_do_message_isr_bh);
1722 timer_delete_sync(&acb->eternal_timer);
1723 if (set_date_time)
1724 timer_delete_sync(&acb->refresh_timer);
1725 arcmsr_disable_outbound_ints(acb);
1726 arcmsr_stop_adapter_bgrb(acb);
1727 arcmsr_flush_adapter_cache(acb);
1728 acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
1729 acb->acb_flags &= ~ACB_F_IOP_INITED;
1730
1731 for (poll_count = 0; poll_count < acb->maxOutstanding; poll_count++){
1732 if (!atomic_read(&acb->ccboutstandingcount))
1733 break;
1734 arcmsr_interrupt(acb);/* FIXME: need spinlock */
1735 msleep(25);
1736 }
1737
1738 if (atomic_read(&acb->ccboutstandingcount)) {
1739 int i;
1740
1741 arcmsr_abort_allcmd(acb);
1742 arcmsr_done4abort_postqueue(acb);
1743 for (i = 0; i < acb->maxFreeCCB; i++) {
1744 struct CommandControlBlock *ccb = acb->pccb_pool[i];
1745 if (ccb->startdone == ARCMSR_CCB_START) {
1746 ccb->startdone = ARCMSR_CCB_ABORTED;
1747 ccb->pcmd->result = DID_ABORT << 16;
1748 arcmsr_ccb_complete(ccb);
1749 }
1750 }
1751 }
1752 arcmsr_free_irq(pdev, acb);
1753 arcmsr_free_ccb_pool(acb);
1754 if (acb->adapter_type == ACB_ADAPTER_TYPE_F)
1755 arcmsr_free_io_queue(acb);
1756 arcmsr_unmap_pciregion(acb);
1757 pci_release_regions(pdev);
1758 scsi_host_put(host);
1759 pci_disable_device(pdev);
1760 }
1761
arcmsr_shutdown(struct pci_dev * pdev)1762 static void arcmsr_shutdown(struct pci_dev *pdev)
1763 {
1764 struct Scsi_Host *host = pci_get_drvdata(pdev);
1765 struct AdapterControlBlock *acb =
1766 (struct AdapterControlBlock *)host->hostdata;
1767 if (acb->acb_flags & ACB_F_ADAPTER_REMOVED)
1768 return;
1769 timer_delete_sync(&acb->eternal_timer);
1770 if (set_date_time)
1771 timer_delete_sync(&acb->refresh_timer);
1772 arcmsr_disable_outbound_ints(acb);
1773 arcmsr_free_irq(pdev, acb);
1774 flush_work(&acb->arcmsr_do_message_isr_bh);
1775 arcmsr_stop_adapter_bgrb(acb);
1776 arcmsr_flush_adapter_cache(acb);
1777 }
1778
arcmsr_module_init(void)1779 static int __init arcmsr_module_init(void)
1780 {
1781 int error = 0;
1782 error = pci_register_driver(&arcmsr_pci_driver);
1783 return error;
1784 }
1785
arcmsr_module_exit(void)1786 static void __exit arcmsr_module_exit(void)
1787 {
1788 pci_unregister_driver(&arcmsr_pci_driver);
1789 }
1790 module_init(arcmsr_module_init);
1791 module_exit(arcmsr_module_exit);
1792
arcmsr_enable_outbound_ints(struct AdapterControlBlock * acb,u32 intmask_org)1793 static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb,
1794 u32 intmask_org)
1795 {
1796 u32 mask;
1797 switch (acb->adapter_type) {
1798
1799 case ACB_ADAPTER_TYPE_A: {
1800 struct MessageUnit_A __iomem *reg = acb->pmuA;
1801 mask = intmask_org & ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE |
1802 ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE|
1803 ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE);
1804 writel(mask, ®->outbound_intmask);
1805 acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff;
1806 }
1807 break;
1808
1809 case ACB_ADAPTER_TYPE_B: {
1810 struct MessageUnit_B *reg = acb->pmuB;
1811 mask = intmask_org | (ARCMSR_IOP2DRV_DATA_WRITE_OK |
1812 ARCMSR_IOP2DRV_DATA_READ_OK |
1813 ARCMSR_IOP2DRV_CDB_DONE |
1814 ARCMSR_IOP2DRV_MESSAGE_CMD_DONE);
1815 writel(mask, reg->iop2drv_doorbell_mask);
1816 acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f;
1817 }
1818 break;
1819 case ACB_ADAPTER_TYPE_C: {
1820 struct MessageUnit_C __iomem *reg = acb->pmuC;
1821 mask = ~(ARCMSR_HBCMU_UTILITY_A_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR_MASK|ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK);
1822 writel(intmask_org & mask, ®->host_int_mask);
1823 acb->outbound_int_enable = ~(intmask_org & mask) & 0x0000000f;
1824 }
1825 break;
1826 case ACB_ADAPTER_TYPE_D: {
1827 struct MessageUnit_D *reg = acb->pmuD;
1828
1829 mask = ARCMSR_ARC1214_ALL_INT_ENABLE;
1830 writel(intmask_org | mask, reg->pcief0_int_enable);
1831 break;
1832 }
1833 case ACB_ADAPTER_TYPE_E:
1834 case ACB_ADAPTER_TYPE_F: {
1835 struct MessageUnit_E __iomem *reg = acb->pmuE;
1836
1837 mask = ~(ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR | ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR);
1838 writel(intmask_org & mask, ®->host_int_mask);
1839 break;
1840 }
1841 }
1842 }
1843
arcmsr_build_ccb(struct AdapterControlBlock * acb,struct CommandControlBlock * ccb,struct scsi_cmnd * pcmd)1844 static int arcmsr_build_ccb(struct AdapterControlBlock *acb,
1845 struct CommandControlBlock *ccb, struct scsi_cmnd *pcmd)
1846 {
1847 struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
1848 int8_t *psge = (int8_t *)&arcmsr_cdb->u;
1849 __le32 address_lo, address_hi;
1850 int arccdbsize = 0x30;
1851 __le32 length = 0;
1852 int i;
1853 struct scatterlist *sg;
1854 int nseg;
1855 ccb->pcmd = pcmd;
1856 memset(arcmsr_cdb, 0, sizeof(struct ARCMSR_CDB));
1857 arcmsr_cdb->TargetID = pcmd->device->id;
1858 arcmsr_cdb->LUN = pcmd->device->lun;
1859 arcmsr_cdb->Function = 1;
1860 arcmsr_cdb->msgContext = 0;
1861 memcpy(arcmsr_cdb->Cdb, pcmd->cmnd, pcmd->cmd_len);
1862
1863 nseg = scsi_dma_map(pcmd);
1864 if (unlikely(nseg > acb->host->sg_tablesize || nseg < 0))
1865 return FAILED;
1866 scsi_for_each_sg(pcmd, sg, nseg, i) {
1867 /* Get the physical address of the current data pointer */
1868 length = cpu_to_le32(sg_dma_len(sg));
1869 address_lo = cpu_to_le32(dma_addr_lo32(sg_dma_address(sg)));
1870 address_hi = cpu_to_le32(dma_addr_hi32(sg_dma_address(sg)));
1871 if (address_hi == 0) {
1872 struct SG32ENTRY *pdma_sg = (struct SG32ENTRY *)psge;
1873
1874 pdma_sg->address = address_lo;
1875 pdma_sg->length = length;
1876 psge += sizeof (struct SG32ENTRY);
1877 arccdbsize += sizeof (struct SG32ENTRY);
1878 } else {
1879 struct SG64ENTRY *pdma_sg = (struct SG64ENTRY *)psge;
1880
1881 pdma_sg->addresshigh = address_hi;
1882 pdma_sg->address = address_lo;
1883 pdma_sg->length = length|cpu_to_le32(IS_SG64_ADDR);
1884 psge += sizeof (struct SG64ENTRY);
1885 arccdbsize += sizeof (struct SG64ENTRY);
1886 }
1887 }
1888 arcmsr_cdb->sgcount = (uint8_t)nseg;
1889 arcmsr_cdb->DataLength = scsi_bufflen(pcmd);
1890 arcmsr_cdb->msgPages = arccdbsize/0x100 + (arccdbsize % 0x100 ? 1 : 0);
1891 if ( arccdbsize > 256)
1892 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE;
1893 if (pcmd->sc_data_direction == DMA_TO_DEVICE)
1894 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE;
1895 ccb->arc_cdb_size = arccdbsize;
1896 return SUCCESS;
1897 }
1898
arcmsr_post_ccb(struct AdapterControlBlock * acb,struct CommandControlBlock * ccb)1899 static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandControlBlock *ccb)
1900 {
1901 uint32_t cdb_phyaddr = ccb->cdb_phyaddr;
1902 struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
1903 atomic_inc(&acb->ccboutstandingcount);
1904 ccb->startdone = ARCMSR_CCB_START;
1905 switch (acb->adapter_type) {
1906 case ACB_ADAPTER_TYPE_A: {
1907 struct MessageUnit_A __iomem *reg = acb->pmuA;
1908
1909 if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE)
1910 writel(cdb_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE,
1911 ®->inbound_queueport);
1912 else
1913 writel(cdb_phyaddr, ®->inbound_queueport);
1914 break;
1915 }
1916
1917 case ACB_ADAPTER_TYPE_B: {
1918 struct MessageUnit_B *reg = acb->pmuB;
1919 uint32_t ending_index, index = reg->postq_index;
1920
1921 ending_index = ((index + 1) % ARCMSR_MAX_HBB_POSTQUEUE);
1922 reg->post_qbuffer[ending_index] = 0;
1923 if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
1924 reg->post_qbuffer[index] =
1925 cdb_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE;
1926 } else {
1927 reg->post_qbuffer[index] = cdb_phyaddr;
1928 }
1929 index++;
1930 index %= ARCMSR_MAX_HBB_POSTQUEUE;/*if last index number set it to 0 */
1931 reg->postq_index = index;
1932 writel(ARCMSR_DRV2IOP_CDB_POSTED, reg->drv2iop_doorbell);
1933 }
1934 break;
1935 case ACB_ADAPTER_TYPE_C: {
1936 struct MessageUnit_C __iomem *phbcmu = acb->pmuC;
1937 uint32_t ccb_post_stamp, arc_cdb_size;
1938
1939 arc_cdb_size = (ccb->arc_cdb_size > 0x300) ? 0x300 : ccb->arc_cdb_size;
1940 ccb_post_stamp = (cdb_phyaddr | ((arc_cdb_size - 1) >> 6) | 1);
1941 writel(upper_32_bits(ccb->cdb_phyaddr), &phbcmu->inbound_queueport_high);
1942 writel(ccb_post_stamp, &phbcmu->inbound_queueport_low);
1943 }
1944 break;
1945 case ACB_ADAPTER_TYPE_D: {
1946 struct MessageUnit_D *pmu = acb->pmuD;
1947 u16 index_stripped;
1948 u16 postq_index, toggle;
1949 unsigned long flags;
1950 struct InBound_SRB *pinbound_srb;
1951
1952 spin_lock_irqsave(&acb->postq_lock, flags);
1953 postq_index = pmu->postq_index;
1954 pinbound_srb = (struct InBound_SRB *)&(pmu->post_qbuffer[postq_index & 0xFF]);
1955 pinbound_srb->addressHigh = upper_32_bits(ccb->cdb_phyaddr);
1956 pinbound_srb->addressLow = cdb_phyaddr;
1957 pinbound_srb->length = ccb->arc_cdb_size >> 2;
1958 arcmsr_cdb->msgContext = dma_addr_lo32(cdb_phyaddr);
1959 toggle = postq_index & 0x4000;
1960 index_stripped = postq_index + 1;
1961 index_stripped &= (ARCMSR_MAX_ARC1214_POSTQUEUE - 1);
1962 pmu->postq_index = index_stripped ? (index_stripped | toggle) :
1963 (toggle ^ 0x4000);
1964 writel(postq_index, pmu->inboundlist_write_pointer);
1965 spin_unlock_irqrestore(&acb->postq_lock, flags);
1966 break;
1967 }
1968 case ACB_ADAPTER_TYPE_E: {
1969 struct MessageUnit_E __iomem *pmu = acb->pmuE;
1970 u32 ccb_post_stamp, arc_cdb_size;
1971
1972 arc_cdb_size = (ccb->arc_cdb_size > 0x300) ? 0x300 : ccb->arc_cdb_size;
1973 ccb_post_stamp = (ccb->smid | ((arc_cdb_size - 1) >> 6));
1974 writel(0, &pmu->inbound_queueport_high);
1975 writel(ccb_post_stamp, &pmu->inbound_queueport_low);
1976 break;
1977 }
1978 case ACB_ADAPTER_TYPE_F: {
1979 struct MessageUnit_F __iomem *pmu = acb->pmuF;
1980 u32 ccb_post_stamp, arc_cdb_size;
1981
1982 if (ccb->arc_cdb_size <= 0x300)
1983 arc_cdb_size = (ccb->arc_cdb_size - 1) >> 6 | 1;
1984 else {
1985 arc_cdb_size = ((ccb->arc_cdb_size + 0xff) >> 8) + 2;
1986 if (arc_cdb_size > 0xF)
1987 arc_cdb_size = 0xF;
1988 arc_cdb_size = (arc_cdb_size << 1) | 1;
1989 }
1990 ccb_post_stamp = (ccb->smid | arc_cdb_size);
1991 writel(0, &pmu->inbound_queueport_high);
1992 writel(ccb_post_stamp, &pmu->inbound_queueport_low);
1993 break;
1994 }
1995 }
1996 }
1997
arcmsr_hbaA_stop_bgrb(struct AdapterControlBlock * acb)1998 static void arcmsr_hbaA_stop_bgrb(struct AdapterControlBlock *acb)
1999 {
2000 struct MessageUnit_A __iomem *reg = acb->pmuA;
2001 acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
2002 writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, ®->inbound_msgaddr0);
2003 if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
2004 printk(KERN_NOTICE
2005 "arcmsr%d: wait 'stop adapter background rebuild' timeout\n"
2006 , acb->host->host_no);
2007 }
2008 }
2009
arcmsr_hbaB_stop_bgrb(struct AdapterControlBlock * acb)2010 static void arcmsr_hbaB_stop_bgrb(struct AdapterControlBlock *acb)
2011 {
2012 struct MessageUnit_B *reg = acb->pmuB;
2013 acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
2014 writel(ARCMSR_MESSAGE_STOP_BGRB, reg->drv2iop_doorbell);
2015
2016 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
2017 printk(KERN_NOTICE
2018 "arcmsr%d: wait 'stop adapter background rebuild' timeout\n"
2019 , acb->host->host_no);
2020 }
2021 }
2022
arcmsr_hbaC_stop_bgrb(struct AdapterControlBlock * pACB)2023 static void arcmsr_hbaC_stop_bgrb(struct AdapterControlBlock *pACB)
2024 {
2025 struct MessageUnit_C __iomem *reg = pACB->pmuC;
2026 pACB->acb_flags &= ~ACB_F_MSG_START_BGRB;
2027 writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, ®->inbound_msgaddr0);
2028 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell);
2029 if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
2030 printk(KERN_NOTICE
2031 "arcmsr%d: wait 'stop adapter background rebuild' timeout\n"
2032 , pACB->host->host_no);
2033 }
2034 return;
2035 }
2036
arcmsr_hbaD_stop_bgrb(struct AdapterControlBlock * pACB)2037 static void arcmsr_hbaD_stop_bgrb(struct AdapterControlBlock *pACB)
2038 {
2039 struct MessageUnit_D *reg = pACB->pmuD;
2040
2041 pACB->acb_flags &= ~ACB_F_MSG_START_BGRB;
2042 writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, reg->inbound_msgaddr0);
2043 if (!arcmsr_hbaD_wait_msgint_ready(pACB))
2044 pr_notice("arcmsr%d: wait 'stop adapter background rebuild' "
2045 "timeout\n", pACB->host->host_no);
2046 }
2047
arcmsr_hbaE_stop_bgrb(struct AdapterControlBlock * pACB)2048 static void arcmsr_hbaE_stop_bgrb(struct AdapterControlBlock *pACB)
2049 {
2050 struct MessageUnit_E __iomem *reg = pACB->pmuE;
2051
2052 pACB->acb_flags &= ~ACB_F_MSG_START_BGRB;
2053 writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, ®->inbound_msgaddr0);
2054 pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
2055 writel(pACB->out_doorbell, ®->iobound_doorbell);
2056 if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
2057 pr_notice("arcmsr%d: wait 'stop adapter background rebuild' "
2058 "timeout\n", pACB->host->host_no);
2059 }
2060 }
2061
arcmsr_stop_adapter_bgrb(struct AdapterControlBlock * acb)2062 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
2063 {
2064 switch (acb->adapter_type) {
2065 case ACB_ADAPTER_TYPE_A:
2066 arcmsr_hbaA_stop_bgrb(acb);
2067 break;
2068 case ACB_ADAPTER_TYPE_B:
2069 arcmsr_hbaB_stop_bgrb(acb);
2070 break;
2071 case ACB_ADAPTER_TYPE_C:
2072 arcmsr_hbaC_stop_bgrb(acb);
2073 break;
2074 case ACB_ADAPTER_TYPE_D:
2075 arcmsr_hbaD_stop_bgrb(acb);
2076 break;
2077 case ACB_ADAPTER_TYPE_E:
2078 case ACB_ADAPTER_TYPE_F:
2079 arcmsr_hbaE_stop_bgrb(acb);
2080 break;
2081 }
2082 }
2083
arcmsr_free_ccb_pool(struct AdapterControlBlock * acb)2084 static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb)
2085 {
2086 if (acb->xor_mega) {
2087 struct Xor_sg *pXorPhys;
2088 void **pXorVirt;
2089 int i;
2090
2091 pXorPhys = (struct Xor_sg *)(acb->xorVirt +
2092 sizeof(struct HostRamBuf));
2093 pXorVirt = (void **)((unsigned long)acb->xorVirt +
2094 (unsigned long)acb->xorVirtOffset);
2095 for (i = 0; i < acb->xor_mega; i++) {
2096 if (pXorPhys->xorPhys) {
2097 dma_free_coherent(&acb->pdev->dev,
2098 ARCMSR_XOR_SEG_SIZE,
2099 *pXorVirt, pXorPhys->xorPhys);
2100 pXorPhys->xorPhys = 0;
2101 *pXorVirt = NULL;
2102 }
2103 pXorPhys++;
2104 pXorVirt++;
2105 }
2106 dma_free_coherent(&acb->pdev->dev, acb->init2cfg_size,
2107 acb->xorVirt, acb->xorPhys);
2108 }
2109 dma_free_coherent(&acb->pdev->dev, acb->uncache_size, acb->dma_coherent, acb->dma_coherent_handle);
2110 }
2111
arcmsr_iop_message_read(struct AdapterControlBlock * acb)2112 static void arcmsr_iop_message_read(struct AdapterControlBlock *acb)
2113 {
2114 switch (acb->adapter_type) {
2115 case ACB_ADAPTER_TYPE_A: {
2116 struct MessageUnit_A __iomem *reg = acb->pmuA;
2117 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, ®->inbound_doorbell);
2118 }
2119 break;
2120 case ACB_ADAPTER_TYPE_B: {
2121 struct MessageUnit_B *reg = acb->pmuB;
2122 writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell);
2123 }
2124 break;
2125 case ACB_ADAPTER_TYPE_C: {
2126 struct MessageUnit_C __iomem *reg = acb->pmuC;
2127
2128 writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, ®->inbound_doorbell);
2129 }
2130 break;
2131 case ACB_ADAPTER_TYPE_D: {
2132 struct MessageUnit_D *reg = acb->pmuD;
2133 writel(ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ,
2134 reg->inbound_doorbell);
2135 }
2136 break;
2137 case ACB_ADAPTER_TYPE_E:
2138 case ACB_ADAPTER_TYPE_F: {
2139 struct MessageUnit_E __iomem *reg = acb->pmuE;
2140 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK;
2141 writel(acb->out_doorbell, ®->iobound_doorbell);
2142 }
2143 break;
2144 }
2145 }
2146
arcmsr_iop_message_wrote(struct AdapterControlBlock * acb)2147 static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb)
2148 {
2149 switch (acb->adapter_type) {
2150 case ACB_ADAPTER_TYPE_A: {
2151 struct MessageUnit_A __iomem *reg = acb->pmuA;
2152 /*
2153 ** push inbound doorbell tell iop, driver data write ok
2154 ** and wait reply on next hwinterrupt for next Qbuffer post
2155 */
2156 writel(ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK, ®->inbound_doorbell);
2157 }
2158 break;
2159
2160 case ACB_ADAPTER_TYPE_B: {
2161 struct MessageUnit_B *reg = acb->pmuB;
2162 /*
2163 ** push inbound doorbell tell iop, driver data write ok
2164 ** and wait reply on next hwinterrupt for next Qbuffer post
2165 */
2166 writel(ARCMSR_DRV2IOP_DATA_WRITE_OK, reg->drv2iop_doorbell);
2167 }
2168 break;
2169 case ACB_ADAPTER_TYPE_C: {
2170 struct MessageUnit_C __iomem *reg = acb->pmuC;
2171 /*
2172 ** push inbound doorbell tell iop, driver data write ok
2173 ** and wait reply on next hwinterrupt for next Qbuffer post
2174 */
2175 writel(ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK, ®->inbound_doorbell);
2176 }
2177 break;
2178 case ACB_ADAPTER_TYPE_D: {
2179 struct MessageUnit_D *reg = acb->pmuD;
2180 writel(ARCMSR_ARC1214_DRV2IOP_DATA_IN_READY,
2181 reg->inbound_doorbell);
2182 }
2183 break;
2184 case ACB_ADAPTER_TYPE_E:
2185 case ACB_ADAPTER_TYPE_F: {
2186 struct MessageUnit_E __iomem *reg = acb->pmuE;
2187 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_WRITE_OK;
2188 writel(acb->out_doorbell, ®->iobound_doorbell);
2189 }
2190 break;
2191 }
2192 }
2193
arcmsr_get_iop_rqbuffer(struct AdapterControlBlock * acb)2194 struct QBUFFER __iomem *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *acb)
2195 {
2196 struct QBUFFER __iomem *qbuffer = NULL;
2197 switch (acb->adapter_type) {
2198
2199 case ACB_ADAPTER_TYPE_A: {
2200 struct MessageUnit_A __iomem *reg = acb->pmuA;
2201 qbuffer = (struct QBUFFER __iomem *)®->message_rbuffer;
2202 }
2203 break;
2204 case ACB_ADAPTER_TYPE_B: {
2205 struct MessageUnit_B *reg = acb->pmuB;
2206 qbuffer = (struct QBUFFER __iomem *)reg->message_rbuffer;
2207 }
2208 break;
2209 case ACB_ADAPTER_TYPE_C: {
2210 struct MessageUnit_C __iomem *phbcmu = acb->pmuC;
2211 qbuffer = (struct QBUFFER __iomem *)&phbcmu->message_rbuffer;
2212 }
2213 break;
2214 case ACB_ADAPTER_TYPE_D: {
2215 struct MessageUnit_D *reg = acb->pmuD;
2216 qbuffer = (struct QBUFFER __iomem *)reg->message_rbuffer;
2217 }
2218 break;
2219 case ACB_ADAPTER_TYPE_E: {
2220 struct MessageUnit_E __iomem *reg = acb->pmuE;
2221 qbuffer = (struct QBUFFER __iomem *)®->message_rbuffer;
2222 }
2223 break;
2224 case ACB_ADAPTER_TYPE_F: {
2225 qbuffer = (struct QBUFFER __iomem *)acb->message_rbuffer;
2226 }
2227 break;
2228 }
2229 return qbuffer;
2230 }
2231
arcmsr_get_iop_wqbuffer(struct AdapterControlBlock * acb)2232 static struct QBUFFER __iomem *arcmsr_get_iop_wqbuffer(struct AdapterControlBlock *acb)
2233 {
2234 struct QBUFFER __iomem *pqbuffer = NULL;
2235 switch (acb->adapter_type) {
2236
2237 case ACB_ADAPTER_TYPE_A: {
2238 struct MessageUnit_A __iomem *reg = acb->pmuA;
2239 pqbuffer = (struct QBUFFER __iomem *) ®->message_wbuffer;
2240 }
2241 break;
2242 case ACB_ADAPTER_TYPE_B: {
2243 struct MessageUnit_B *reg = acb->pmuB;
2244 pqbuffer = (struct QBUFFER __iomem *)reg->message_wbuffer;
2245 }
2246 break;
2247 case ACB_ADAPTER_TYPE_C: {
2248 struct MessageUnit_C __iomem *reg = acb->pmuC;
2249 pqbuffer = (struct QBUFFER __iomem *)®->message_wbuffer;
2250 }
2251 break;
2252 case ACB_ADAPTER_TYPE_D: {
2253 struct MessageUnit_D *reg = acb->pmuD;
2254 pqbuffer = (struct QBUFFER __iomem *)reg->message_wbuffer;
2255 }
2256 break;
2257 case ACB_ADAPTER_TYPE_E: {
2258 struct MessageUnit_E __iomem *reg = acb->pmuE;
2259 pqbuffer = (struct QBUFFER __iomem *)®->message_wbuffer;
2260 }
2261 break;
2262 case ACB_ADAPTER_TYPE_F:
2263 pqbuffer = (struct QBUFFER __iomem *)acb->message_wbuffer;
2264 break;
2265 }
2266 return pqbuffer;
2267 }
2268
2269 static uint32_t
arcmsr_Read_iop_rqbuffer_in_DWORD(struct AdapterControlBlock * acb,struct QBUFFER __iomem * prbuffer)2270 arcmsr_Read_iop_rqbuffer_in_DWORD(struct AdapterControlBlock *acb,
2271 struct QBUFFER __iomem *prbuffer)
2272 {
2273 uint8_t *pQbuffer;
2274 uint8_t *buf1 = NULL;
2275 uint32_t __iomem *iop_data;
2276 uint32_t iop_len, data_len, *buf2 = NULL;
2277
2278 iop_data = (uint32_t __iomem *)prbuffer->data;
2279 iop_len = readl(&prbuffer->data_len);
2280 if (iop_len > 0) {
2281 buf1 = kmalloc(128, GFP_ATOMIC);
2282 buf2 = (uint32_t *)buf1;
2283 if (buf1 == NULL)
2284 return 0;
2285 data_len = iop_len;
2286 while (data_len >= 4) {
2287 *buf2++ = readl(iop_data);
2288 iop_data++;
2289 data_len -= 4;
2290 }
2291 if (data_len)
2292 *buf2 = readl(iop_data);
2293 buf2 = (uint32_t *)buf1;
2294 }
2295 while (iop_len > 0) {
2296 pQbuffer = &acb->rqbuffer[acb->rqbuf_putIndex];
2297 *pQbuffer = *buf1;
2298 acb->rqbuf_putIndex++;
2299 /* if last, index number set it to 0 */
2300 acb->rqbuf_putIndex %= ARCMSR_MAX_QBUFFER;
2301 buf1++;
2302 iop_len--;
2303 }
2304 kfree(buf2);
2305 /* let IOP know data has been read */
2306 arcmsr_iop_message_read(acb);
2307 return 1;
2308 }
2309
2310 uint32_t
arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock * acb,struct QBUFFER __iomem * prbuffer)2311 arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock *acb,
2312 struct QBUFFER __iomem *prbuffer) {
2313
2314 uint8_t *pQbuffer;
2315 uint8_t __iomem *iop_data;
2316 uint32_t iop_len;
2317
2318 if (acb->adapter_type > ACB_ADAPTER_TYPE_B)
2319 return arcmsr_Read_iop_rqbuffer_in_DWORD(acb, prbuffer);
2320 iop_data = (uint8_t __iomem *)prbuffer->data;
2321 iop_len = readl(&prbuffer->data_len);
2322 while (iop_len > 0) {
2323 pQbuffer = &acb->rqbuffer[acb->rqbuf_putIndex];
2324 *pQbuffer = readb(iop_data);
2325 acb->rqbuf_putIndex++;
2326 acb->rqbuf_putIndex %= ARCMSR_MAX_QBUFFER;
2327 iop_data++;
2328 iop_len--;
2329 }
2330 arcmsr_iop_message_read(acb);
2331 return 1;
2332 }
2333
arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock * acb)2334 static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb)
2335 {
2336 unsigned long flags;
2337 struct QBUFFER __iomem *prbuffer;
2338 int32_t buf_empty_len;
2339
2340 spin_lock_irqsave(&acb->rqbuffer_lock, flags);
2341 prbuffer = arcmsr_get_iop_rqbuffer(acb);
2342 if (acb->rqbuf_putIndex >= acb->rqbuf_getIndex) {
2343 buf_empty_len = (ARCMSR_MAX_QBUFFER - 1) -
2344 (acb->rqbuf_putIndex - acb->rqbuf_getIndex);
2345 } else
2346 buf_empty_len = acb->rqbuf_getIndex - acb->rqbuf_putIndex - 1;
2347 if (buf_empty_len >= readl(&prbuffer->data_len)) {
2348 if (arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0)
2349 acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
2350 } else
2351 acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
2352 spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
2353 }
2354
arcmsr_write_ioctldata2iop_in_DWORD(struct AdapterControlBlock * acb)2355 static void arcmsr_write_ioctldata2iop_in_DWORD(struct AdapterControlBlock *acb)
2356 {
2357 uint8_t *pQbuffer;
2358 struct QBUFFER __iomem *pwbuffer;
2359 uint8_t *buf1 = NULL;
2360 uint32_t __iomem *iop_data;
2361 uint32_t allxfer_len = 0, data_len, *buf2 = NULL, data;
2362
2363 if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) {
2364 buf1 = kmalloc(128, GFP_ATOMIC);
2365 buf2 = (uint32_t *)buf1;
2366 if (buf1 == NULL)
2367 return;
2368
2369 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
2370 pwbuffer = arcmsr_get_iop_wqbuffer(acb);
2371 iop_data = (uint32_t __iomem *)pwbuffer->data;
2372 while ((acb->wqbuf_getIndex != acb->wqbuf_putIndex)
2373 && (allxfer_len < 124)) {
2374 pQbuffer = &acb->wqbuffer[acb->wqbuf_getIndex];
2375 *buf1 = *pQbuffer;
2376 acb->wqbuf_getIndex++;
2377 acb->wqbuf_getIndex %= ARCMSR_MAX_QBUFFER;
2378 buf1++;
2379 allxfer_len++;
2380 }
2381 data_len = allxfer_len;
2382 buf1 = (uint8_t *)buf2;
2383 while (data_len >= 4) {
2384 data = *buf2++;
2385 writel(data, iop_data);
2386 iop_data++;
2387 data_len -= 4;
2388 }
2389 if (data_len) {
2390 data = *buf2;
2391 writel(data, iop_data);
2392 }
2393 writel(allxfer_len, &pwbuffer->data_len);
2394 kfree(buf1);
2395 arcmsr_iop_message_wrote(acb);
2396 }
2397 }
2398
2399 void
arcmsr_write_ioctldata2iop(struct AdapterControlBlock * acb)2400 arcmsr_write_ioctldata2iop(struct AdapterControlBlock *acb)
2401 {
2402 uint8_t *pQbuffer;
2403 struct QBUFFER __iomem *pwbuffer;
2404 uint8_t __iomem *iop_data;
2405 int32_t allxfer_len = 0;
2406
2407 if (acb->adapter_type > ACB_ADAPTER_TYPE_B) {
2408 arcmsr_write_ioctldata2iop_in_DWORD(acb);
2409 return;
2410 }
2411 if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) {
2412 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
2413 pwbuffer = arcmsr_get_iop_wqbuffer(acb);
2414 iop_data = (uint8_t __iomem *)pwbuffer->data;
2415 while ((acb->wqbuf_getIndex != acb->wqbuf_putIndex)
2416 && (allxfer_len < 124)) {
2417 pQbuffer = &acb->wqbuffer[acb->wqbuf_getIndex];
2418 writeb(*pQbuffer, iop_data);
2419 acb->wqbuf_getIndex++;
2420 acb->wqbuf_getIndex %= ARCMSR_MAX_QBUFFER;
2421 iop_data++;
2422 allxfer_len++;
2423 }
2424 writel(allxfer_len, &pwbuffer->data_len);
2425 arcmsr_iop_message_wrote(acb);
2426 }
2427 }
2428
arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock * acb)2429 static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb)
2430 {
2431 unsigned long flags;
2432
2433 spin_lock_irqsave(&acb->wqbuffer_lock, flags);
2434 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READED;
2435 if (acb->wqbuf_getIndex != acb->wqbuf_putIndex)
2436 arcmsr_write_ioctldata2iop(acb);
2437 if (acb->wqbuf_getIndex == acb->wqbuf_putIndex)
2438 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED;
2439 spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
2440 }
2441
arcmsr_hbaA_doorbell_isr(struct AdapterControlBlock * acb)2442 static void arcmsr_hbaA_doorbell_isr(struct AdapterControlBlock *acb)
2443 {
2444 uint32_t outbound_doorbell;
2445 struct MessageUnit_A __iomem *reg = acb->pmuA;
2446 outbound_doorbell = readl(®->outbound_doorbell);
2447 do {
2448 writel(outbound_doorbell, ®->outbound_doorbell);
2449 if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK)
2450 arcmsr_iop2drv_data_wrote_handle(acb);
2451 if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK)
2452 arcmsr_iop2drv_data_read_handle(acb);
2453 outbound_doorbell = readl(®->outbound_doorbell);
2454 } while (outbound_doorbell & (ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK
2455 | ARCMSR_OUTBOUND_IOP331_DATA_READ_OK));
2456 }
arcmsr_hbaC_doorbell_isr(struct AdapterControlBlock * pACB)2457 static void arcmsr_hbaC_doorbell_isr(struct AdapterControlBlock *pACB)
2458 {
2459 uint32_t outbound_doorbell;
2460 struct MessageUnit_C __iomem *reg = pACB->pmuC;
2461 /*
2462 *******************************************************************
2463 ** Maybe here we need to check wrqbuffer_lock is lock or not
2464 ** DOORBELL: din! don!
2465 ** check if there are any mail need to pack from firmware
2466 *******************************************************************
2467 */
2468 outbound_doorbell = readl(®->outbound_doorbell);
2469 do {
2470 writel(outbound_doorbell, ®->outbound_doorbell_clear);
2471 readl(®->outbound_doorbell_clear);
2472 if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK)
2473 arcmsr_iop2drv_data_wrote_handle(pACB);
2474 if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK)
2475 arcmsr_iop2drv_data_read_handle(pACB);
2476 if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE)
2477 arcmsr_hbaC_message_isr(pACB);
2478 outbound_doorbell = readl(®->outbound_doorbell);
2479 } while (outbound_doorbell & (ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK
2480 | ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK
2481 | ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE));
2482 }
2483
arcmsr_hbaD_doorbell_isr(struct AdapterControlBlock * pACB)2484 static void arcmsr_hbaD_doorbell_isr(struct AdapterControlBlock *pACB)
2485 {
2486 uint32_t outbound_doorbell;
2487 struct MessageUnit_D *pmu = pACB->pmuD;
2488
2489 outbound_doorbell = readl(pmu->outbound_doorbell);
2490 do {
2491 writel(outbound_doorbell, pmu->outbound_doorbell);
2492 if (outbound_doorbell & ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE)
2493 arcmsr_hbaD_message_isr(pACB);
2494 if (outbound_doorbell & ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK)
2495 arcmsr_iop2drv_data_wrote_handle(pACB);
2496 if (outbound_doorbell & ARCMSR_ARC1214_IOP2DRV_DATA_READ_OK)
2497 arcmsr_iop2drv_data_read_handle(pACB);
2498 outbound_doorbell = readl(pmu->outbound_doorbell);
2499 } while (outbound_doorbell & (ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK
2500 | ARCMSR_ARC1214_IOP2DRV_DATA_READ_OK
2501 | ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE));
2502 }
2503
arcmsr_hbaE_doorbell_isr(struct AdapterControlBlock * pACB)2504 static void arcmsr_hbaE_doorbell_isr(struct AdapterControlBlock *pACB)
2505 {
2506 uint32_t outbound_doorbell, in_doorbell, tmp, i;
2507 struct MessageUnit_E __iomem *reg = pACB->pmuE;
2508
2509 if (pACB->adapter_type == ACB_ADAPTER_TYPE_F) {
2510 for (i = 0; i < 5; i++) {
2511 in_doorbell = readl(®->iobound_doorbell);
2512 if (in_doorbell != 0)
2513 break;
2514 }
2515 } else
2516 in_doorbell = readl(®->iobound_doorbell);
2517 outbound_doorbell = in_doorbell ^ pACB->in_doorbell;
2518 do {
2519 writel(0, ®->host_int_status); /* clear interrupt */
2520 if (outbound_doorbell & ARCMSR_HBEMU_IOP2DRV_DATA_WRITE_OK) {
2521 arcmsr_iop2drv_data_wrote_handle(pACB);
2522 }
2523 if (outbound_doorbell & ARCMSR_HBEMU_IOP2DRV_DATA_READ_OK) {
2524 arcmsr_iop2drv_data_read_handle(pACB);
2525 }
2526 if (outbound_doorbell & ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE) {
2527 arcmsr_hbaE_message_isr(pACB);
2528 }
2529 tmp = in_doorbell;
2530 in_doorbell = readl(®->iobound_doorbell);
2531 outbound_doorbell = tmp ^ in_doorbell;
2532 } while (outbound_doorbell & (ARCMSR_HBEMU_IOP2DRV_DATA_WRITE_OK
2533 | ARCMSR_HBEMU_IOP2DRV_DATA_READ_OK
2534 | ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE));
2535 pACB->in_doorbell = in_doorbell;
2536 }
2537
arcmsr_hbaA_postqueue_isr(struct AdapterControlBlock * acb)2538 static void arcmsr_hbaA_postqueue_isr(struct AdapterControlBlock *acb)
2539 {
2540 uint32_t flag_ccb;
2541 struct MessageUnit_A __iomem *reg = acb->pmuA;
2542 struct ARCMSR_CDB *pARCMSR_CDB;
2543 struct CommandControlBlock *pCCB;
2544 bool error;
2545 unsigned long cdb_phy_addr;
2546
2547 while ((flag_ccb = readl(®->outbound_queueport)) != 0xFFFFFFFF) {
2548 cdb_phy_addr = (flag_ccb << 5) & 0xffffffff;
2549 if (acb->cdb_phyadd_hipart)
2550 cdb_phy_addr = cdb_phy_addr | acb->cdb_phyadd_hipart;
2551 pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + cdb_phy_addr);
2552 pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
2553 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
2554 arcmsr_drain_donequeue(acb, pCCB, error);
2555 }
2556 }
arcmsr_hbaB_postqueue_isr(struct AdapterControlBlock * acb)2557 static void arcmsr_hbaB_postqueue_isr(struct AdapterControlBlock *acb)
2558 {
2559 uint32_t index;
2560 uint32_t flag_ccb;
2561 struct MessageUnit_B *reg = acb->pmuB;
2562 struct ARCMSR_CDB *pARCMSR_CDB;
2563 struct CommandControlBlock *pCCB;
2564 bool error;
2565 unsigned long cdb_phy_addr;
2566
2567 index = reg->doneq_index;
2568 while ((flag_ccb = reg->done_qbuffer[index]) != 0) {
2569 cdb_phy_addr = (flag_ccb << 5) & 0xffffffff;
2570 if (acb->cdb_phyadd_hipart)
2571 cdb_phy_addr = cdb_phy_addr | acb->cdb_phyadd_hipart;
2572 pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + cdb_phy_addr);
2573 pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
2574 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
2575 arcmsr_drain_donequeue(acb, pCCB, error);
2576 reg->done_qbuffer[index] = 0;
2577 index++;
2578 index %= ARCMSR_MAX_HBB_POSTQUEUE;
2579 reg->doneq_index = index;
2580 }
2581 }
2582
arcmsr_hbaC_postqueue_isr(struct AdapterControlBlock * acb)2583 static void arcmsr_hbaC_postqueue_isr(struct AdapterControlBlock *acb)
2584 {
2585 struct MessageUnit_C __iomem *phbcmu;
2586 struct ARCMSR_CDB *arcmsr_cdb;
2587 struct CommandControlBlock *ccb;
2588 uint32_t flag_ccb, throttling = 0;
2589 unsigned long ccb_cdb_phy;
2590 int error;
2591
2592 phbcmu = acb->pmuC;
2593 /* areca cdb command done */
2594 /* Use correct offset and size for syncing */
2595
2596 while ((flag_ccb = readl(&phbcmu->outbound_queueport_low)) !=
2597 0xFFFFFFFF) {
2598 ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
2599 if (acb->cdb_phyadd_hipart)
2600 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
2601 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset
2602 + ccb_cdb_phy);
2603 ccb = container_of(arcmsr_cdb, struct CommandControlBlock,
2604 arcmsr_cdb);
2605 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1)
2606 ? true : false;
2607 /* check if command done with no error */
2608 arcmsr_drain_donequeue(acb, ccb, error);
2609 throttling++;
2610 if (throttling == ARCMSR_HBC_ISR_THROTTLING_LEVEL) {
2611 writel(ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING,
2612 &phbcmu->inbound_doorbell);
2613 throttling = 0;
2614 }
2615 }
2616 }
2617
arcmsr_hbaD_postqueue_isr(struct AdapterControlBlock * acb)2618 static void arcmsr_hbaD_postqueue_isr(struct AdapterControlBlock *acb)
2619 {
2620 u32 outbound_write_pointer, doneq_index, index_stripped, toggle;
2621 uint32_t addressLow;
2622 int error;
2623 struct MessageUnit_D *pmu;
2624 struct ARCMSR_CDB *arcmsr_cdb;
2625 struct CommandControlBlock *ccb;
2626 unsigned long flags, ccb_cdb_phy;
2627
2628 spin_lock_irqsave(&acb->doneq_lock, flags);
2629 pmu = acb->pmuD;
2630 outbound_write_pointer = pmu->done_qbuffer[0].addressLow + 1;
2631 doneq_index = pmu->doneq_index;
2632 if ((doneq_index & 0xFFF) != (outbound_write_pointer & 0xFFF)) {
2633 do {
2634 toggle = doneq_index & 0x4000;
2635 index_stripped = (doneq_index & 0xFFF) + 1;
2636 index_stripped %= ARCMSR_MAX_ARC1214_DONEQUEUE;
2637 pmu->doneq_index = index_stripped ? (index_stripped | toggle) :
2638 ((toggle ^ 0x4000) + 1);
2639 doneq_index = pmu->doneq_index;
2640 addressLow = pmu->done_qbuffer[doneq_index &
2641 0xFFF].addressLow;
2642 ccb_cdb_phy = (addressLow & 0xFFFFFFF0);
2643 if (acb->cdb_phyadd_hipart)
2644 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
2645 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset
2646 + ccb_cdb_phy);
2647 ccb = container_of(arcmsr_cdb,
2648 struct CommandControlBlock, arcmsr_cdb);
2649 error = (addressLow & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1)
2650 ? true : false;
2651 arcmsr_drain_donequeue(acb, ccb, error);
2652 writel(doneq_index, pmu->outboundlist_read_pointer);
2653 } while ((doneq_index & 0xFFF) !=
2654 (outbound_write_pointer & 0xFFF));
2655 }
2656 writel(ARCMSR_ARC1214_OUTBOUND_LIST_INTERRUPT_CLEAR,
2657 pmu->outboundlist_interrupt_cause);
2658 readl(pmu->outboundlist_interrupt_cause);
2659 spin_unlock_irqrestore(&acb->doneq_lock, flags);
2660 }
2661
arcmsr_hbaE_postqueue_isr(struct AdapterControlBlock * acb)2662 static void arcmsr_hbaE_postqueue_isr(struct AdapterControlBlock *acb)
2663 {
2664 uint32_t doneq_index;
2665 uint16_t cmdSMID;
2666 int error;
2667 struct MessageUnit_E __iomem *pmu;
2668 struct CommandControlBlock *ccb;
2669 unsigned long flags;
2670
2671 spin_lock_irqsave(&acb->doneq_lock, flags);
2672 doneq_index = acb->doneq_index;
2673 pmu = acb->pmuE;
2674 while ((readl(&pmu->reply_post_producer_index) & 0xFFFF) != doneq_index) {
2675 cmdSMID = acb->pCompletionQ[doneq_index].cmdSMID;
2676 ccb = acb->pccb_pool[cmdSMID];
2677 error = (acb->pCompletionQ[doneq_index].cmdFlag
2678 & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
2679 arcmsr_drain_donequeue(acb, ccb, error);
2680 doneq_index++;
2681 if (doneq_index >= acb->completionQ_entry)
2682 doneq_index = 0;
2683 }
2684 acb->doneq_index = doneq_index;
2685 writel(doneq_index, &pmu->reply_post_consumer_index);
2686 spin_unlock_irqrestore(&acb->doneq_lock, flags);
2687 }
2688
arcmsr_hbaF_postqueue_isr(struct AdapterControlBlock * acb)2689 static void arcmsr_hbaF_postqueue_isr(struct AdapterControlBlock *acb)
2690 {
2691 uint32_t doneq_index;
2692 uint16_t cmdSMID;
2693 int error;
2694 struct MessageUnit_F __iomem *phbcmu;
2695 struct CommandControlBlock *ccb;
2696 unsigned long flags;
2697
2698 spin_lock_irqsave(&acb->doneq_lock, flags);
2699 doneq_index = acb->doneq_index;
2700 phbcmu = acb->pmuF;
2701 while (1) {
2702 cmdSMID = acb->pCompletionQ[doneq_index].cmdSMID;
2703 if (cmdSMID == 0xffff)
2704 break;
2705 ccb = acb->pccb_pool[cmdSMID];
2706 error = (acb->pCompletionQ[doneq_index].cmdFlag &
2707 ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
2708 arcmsr_drain_donequeue(acb, ccb, error);
2709 acb->pCompletionQ[doneq_index].cmdSMID = 0xffff;
2710 doneq_index++;
2711 if (doneq_index >= acb->completionQ_entry)
2712 doneq_index = 0;
2713 }
2714 acb->doneq_index = doneq_index;
2715 writel(doneq_index, &phbcmu->reply_post_consumer_index);
2716 spin_unlock_irqrestore(&acb->doneq_lock, flags);
2717 }
2718
2719 /*
2720 **********************************************************************************
2721 ** Handle a message interrupt
2722 **
2723 ** The only message interrupt we expect is in response to a query for the current adapter config.
2724 ** We want this in order to compare the drivemap so that we can detect newly-attached drives.
2725 **********************************************************************************
2726 */
arcmsr_hbaA_message_isr(struct AdapterControlBlock * acb)2727 static void arcmsr_hbaA_message_isr(struct AdapterControlBlock *acb)
2728 {
2729 struct MessageUnit_A __iomem *reg = acb->pmuA;
2730 /*clear interrupt and message state*/
2731 writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT, ®->outbound_intstatus);
2732 if (acb->acb_flags & ACB_F_MSG_GET_CONFIG)
2733 schedule_work(&acb->arcmsr_do_message_isr_bh);
2734 }
arcmsr_hbaB_message_isr(struct AdapterControlBlock * acb)2735 static void arcmsr_hbaB_message_isr(struct AdapterControlBlock *acb)
2736 {
2737 struct MessageUnit_B *reg = acb->pmuB;
2738
2739 /*clear interrupt and message state*/
2740 writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
2741 if (acb->acb_flags & ACB_F_MSG_GET_CONFIG)
2742 schedule_work(&acb->arcmsr_do_message_isr_bh);
2743 }
2744 /*
2745 **********************************************************************************
2746 ** Handle a message interrupt
2747 **
2748 ** The only message interrupt we expect is in response to a query for the
2749 ** current adapter config.
2750 ** We want this in order to compare the drivemap so that we can detect newly-attached drives.
2751 **********************************************************************************
2752 */
arcmsr_hbaC_message_isr(struct AdapterControlBlock * acb)2753 static void arcmsr_hbaC_message_isr(struct AdapterControlBlock *acb)
2754 {
2755 struct MessageUnit_C __iomem *reg = acb->pmuC;
2756 /*clear interrupt and message state*/
2757 writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, ®->outbound_doorbell_clear);
2758 if (acb->acb_flags & ACB_F_MSG_GET_CONFIG)
2759 schedule_work(&acb->arcmsr_do_message_isr_bh);
2760 }
2761
arcmsr_hbaD_message_isr(struct AdapterControlBlock * acb)2762 static void arcmsr_hbaD_message_isr(struct AdapterControlBlock *acb)
2763 {
2764 struct MessageUnit_D *reg = acb->pmuD;
2765
2766 writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE, reg->outbound_doorbell);
2767 readl(reg->outbound_doorbell);
2768 if (acb->acb_flags & ACB_F_MSG_GET_CONFIG)
2769 schedule_work(&acb->arcmsr_do_message_isr_bh);
2770 }
2771
arcmsr_hbaE_message_isr(struct AdapterControlBlock * acb)2772 static void arcmsr_hbaE_message_isr(struct AdapterControlBlock *acb)
2773 {
2774 struct MessageUnit_E __iomem *reg = acb->pmuE;
2775
2776 writel(0, ®->host_int_status);
2777 if (acb->acb_flags & ACB_F_MSG_GET_CONFIG)
2778 schedule_work(&acb->arcmsr_do_message_isr_bh);
2779 }
2780
arcmsr_hbaA_handle_isr(struct AdapterControlBlock * acb)2781 static int arcmsr_hbaA_handle_isr(struct AdapterControlBlock *acb)
2782 {
2783 uint32_t outbound_intstatus;
2784 struct MessageUnit_A __iomem *reg = acb->pmuA;
2785 outbound_intstatus = readl(®->outbound_intstatus) &
2786 acb->outbound_int_enable;
2787 if (!(outbound_intstatus & ARCMSR_MU_OUTBOUND_HANDLE_INT))
2788 return IRQ_NONE;
2789 do {
2790 writel(outbound_intstatus, ®->outbound_intstatus);
2791 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT)
2792 arcmsr_hbaA_doorbell_isr(acb);
2793 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT)
2794 arcmsr_hbaA_postqueue_isr(acb);
2795 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT)
2796 arcmsr_hbaA_message_isr(acb);
2797 outbound_intstatus = readl(®->outbound_intstatus) &
2798 acb->outbound_int_enable;
2799 } while (outbound_intstatus & (ARCMSR_MU_OUTBOUND_DOORBELL_INT
2800 | ARCMSR_MU_OUTBOUND_POSTQUEUE_INT
2801 | ARCMSR_MU_OUTBOUND_MESSAGE0_INT));
2802 return IRQ_HANDLED;
2803 }
2804
arcmsr_hbaB_handle_isr(struct AdapterControlBlock * acb)2805 static int arcmsr_hbaB_handle_isr(struct AdapterControlBlock *acb)
2806 {
2807 uint32_t outbound_doorbell;
2808 struct MessageUnit_B *reg = acb->pmuB;
2809 outbound_doorbell = readl(reg->iop2drv_doorbell) &
2810 acb->outbound_int_enable;
2811 if (!outbound_doorbell)
2812 return IRQ_NONE;
2813 do {
2814 writel(~outbound_doorbell, reg->iop2drv_doorbell);
2815 writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell);
2816 if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK)
2817 arcmsr_iop2drv_data_wrote_handle(acb);
2818 if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK)
2819 arcmsr_iop2drv_data_read_handle(acb);
2820 if (outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE)
2821 arcmsr_hbaB_postqueue_isr(acb);
2822 if (outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE)
2823 arcmsr_hbaB_message_isr(acb);
2824 outbound_doorbell = readl(reg->iop2drv_doorbell) &
2825 acb->outbound_int_enable;
2826 } while (outbound_doorbell & (ARCMSR_IOP2DRV_DATA_WRITE_OK
2827 | ARCMSR_IOP2DRV_DATA_READ_OK
2828 | ARCMSR_IOP2DRV_CDB_DONE
2829 | ARCMSR_IOP2DRV_MESSAGE_CMD_DONE));
2830 return IRQ_HANDLED;
2831 }
2832
arcmsr_hbaC_handle_isr(struct AdapterControlBlock * pACB)2833 static int arcmsr_hbaC_handle_isr(struct AdapterControlBlock *pACB)
2834 {
2835 uint32_t host_interrupt_status;
2836 struct MessageUnit_C __iomem *phbcmu = pACB->pmuC;
2837 /*
2838 *********************************************
2839 ** check outbound intstatus
2840 *********************************************
2841 */
2842 host_interrupt_status = readl(&phbcmu->host_int_status) &
2843 (ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR |
2844 ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR);
2845 if (!host_interrupt_status)
2846 return IRQ_NONE;
2847 do {
2848 if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR)
2849 arcmsr_hbaC_doorbell_isr(pACB);
2850 /* MU post queue interrupts*/
2851 if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR)
2852 arcmsr_hbaC_postqueue_isr(pACB);
2853 host_interrupt_status = readl(&phbcmu->host_int_status);
2854 } while (host_interrupt_status & (ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR |
2855 ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR));
2856 return IRQ_HANDLED;
2857 }
2858
arcmsr_hbaD_handle_isr(struct AdapterControlBlock * pACB)2859 static irqreturn_t arcmsr_hbaD_handle_isr(struct AdapterControlBlock *pACB)
2860 {
2861 u32 host_interrupt_status;
2862 struct MessageUnit_D *pmu = pACB->pmuD;
2863
2864 host_interrupt_status = readl(pmu->host_int_status) &
2865 (ARCMSR_ARC1214_OUTBOUND_POSTQUEUE_ISR |
2866 ARCMSR_ARC1214_OUTBOUND_DOORBELL_ISR);
2867 if (!host_interrupt_status)
2868 return IRQ_NONE;
2869 do {
2870 /* MU post queue interrupts*/
2871 if (host_interrupt_status &
2872 ARCMSR_ARC1214_OUTBOUND_POSTQUEUE_ISR)
2873 arcmsr_hbaD_postqueue_isr(pACB);
2874 if (host_interrupt_status &
2875 ARCMSR_ARC1214_OUTBOUND_DOORBELL_ISR)
2876 arcmsr_hbaD_doorbell_isr(pACB);
2877 host_interrupt_status = readl(pmu->host_int_status);
2878 } while (host_interrupt_status &
2879 (ARCMSR_ARC1214_OUTBOUND_POSTQUEUE_ISR |
2880 ARCMSR_ARC1214_OUTBOUND_DOORBELL_ISR));
2881 return IRQ_HANDLED;
2882 }
2883
arcmsr_hbaE_handle_isr(struct AdapterControlBlock * pACB)2884 static irqreturn_t arcmsr_hbaE_handle_isr(struct AdapterControlBlock *pACB)
2885 {
2886 uint32_t host_interrupt_status;
2887 struct MessageUnit_E __iomem *pmu = pACB->pmuE;
2888
2889 host_interrupt_status = readl(&pmu->host_int_status) &
2890 (ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR |
2891 ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR);
2892 if (!host_interrupt_status)
2893 return IRQ_NONE;
2894 do {
2895 /* MU ioctl transfer doorbell interrupts*/
2896 if (host_interrupt_status & ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR) {
2897 arcmsr_hbaE_doorbell_isr(pACB);
2898 }
2899 /* MU post queue interrupts*/
2900 if (host_interrupt_status & ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR) {
2901 arcmsr_hbaE_postqueue_isr(pACB);
2902 }
2903 host_interrupt_status = readl(&pmu->host_int_status);
2904 } while (host_interrupt_status & (ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR |
2905 ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR));
2906 return IRQ_HANDLED;
2907 }
2908
arcmsr_hbaF_handle_isr(struct AdapterControlBlock * pACB)2909 static irqreturn_t arcmsr_hbaF_handle_isr(struct AdapterControlBlock *pACB)
2910 {
2911 uint32_t host_interrupt_status;
2912 struct MessageUnit_F __iomem *phbcmu = pACB->pmuF;
2913
2914 host_interrupt_status = readl(&phbcmu->host_int_status) &
2915 (ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR |
2916 ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR);
2917 if (!host_interrupt_status)
2918 return IRQ_NONE;
2919 do {
2920 /* MU post queue interrupts*/
2921 if (host_interrupt_status & ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR)
2922 arcmsr_hbaF_postqueue_isr(pACB);
2923
2924 /* MU ioctl transfer doorbell interrupts*/
2925 if (host_interrupt_status & ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR)
2926 arcmsr_hbaE_doorbell_isr(pACB);
2927
2928 host_interrupt_status = readl(&phbcmu->host_int_status);
2929 } while (host_interrupt_status & (ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR |
2930 ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR));
2931 return IRQ_HANDLED;
2932 }
2933
arcmsr_interrupt(struct AdapterControlBlock * acb)2934 static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb)
2935 {
2936 switch (acb->adapter_type) {
2937 case ACB_ADAPTER_TYPE_A:
2938 return arcmsr_hbaA_handle_isr(acb);
2939 case ACB_ADAPTER_TYPE_B:
2940 return arcmsr_hbaB_handle_isr(acb);
2941 case ACB_ADAPTER_TYPE_C:
2942 return arcmsr_hbaC_handle_isr(acb);
2943 case ACB_ADAPTER_TYPE_D:
2944 return arcmsr_hbaD_handle_isr(acb);
2945 case ACB_ADAPTER_TYPE_E:
2946 return arcmsr_hbaE_handle_isr(acb);
2947 case ACB_ADAPTER_TYPE_F:
2948 return arcmsr_hbaF_handle_isr(acb);
2949 default:
2950 return IRQ_NONE;
2951 }
2952 }
2953
arcmsr_iop_parking(struct AdapterControlBlock * acb)2954 static void arcmsr_iop_parking(struct AdapterControlBlock *acb)
2955 {
2956 if (acb) {
2957 /* stop adapter background rebuild */
2958 if (acb->acb_flags & ACB_F_MSG_START_BGRB) {
2959 uint32_t intmask_org;
2960 acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
2961 intmask_org = arcmsr_disable_outbound_ints(acb);
2962 arcmsr_stop_adapter_bgrb(acb);
2963 arcmsr_flush_adapter_cache(acb);
2964 arcmsr_enable_outbound_ints(acb, intmask_org);
2965 }
2966 }
2967 }
2968
2969
arcmsr_clear_iop2drv_rqueue_buffer(struct AdapterControlBlock * acb)2970 void arcmsr_clear_iop2drv_rqueue_buffer(struct AdapterControlBlock *acb)
2971 {
2972 uint32_t i;
2973
2974 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2975 for (i = 0; i < 15; i++) {
2976 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2977 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2978 acb->rqbuf_getIndex = 0;
2979 acb->rqbuf_putIndex = 0;
2980 arcmsr_iop_message_read(acb);
2981 mdelay(30);
2982 } else if (acb->rqbuf_getIndex !=
2983 acb->rqbuf_putIndex) {
2984 acb->rqbuf_getIndex = 0;
2985 acb->rqbuf_putIndex = 0;
2986 mdelay(30);
2987 } else
2988 break;
2989 }
2990 }
2991 }
2992
arcmsr_iop_message_xfer(struct AdapterControlBlock * acb,struct scsi_cmnd * cmd)2993 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
2994 struct scsi_cmnd *cmd)
2995 {
2996 char *buffer;
2997 unsigned short use_sg;
2998 int retvalue = 0, transfer_len = 0;
2999 unsigned long flags;
3000 struct CMD_MESSAGE_FIELD *pcmdmessagefld;
3001 uint32_t controlcode = (uint32_t)cmd->cmnd[5] << 24 |
3002 (uint32_t)cmd->cmnd[6] << 16 |
3003 (uint32_t)cmd->cmnd[7] << 8 |
3004 (uint32_t)cmd->cmnd[8];
3005 struct scatterlist *sg;
3006
3007 use_sg = scsi_sg_count(cmd);
3008 sg = scsi_sglist(cmd);
3009 buffer = kmap_atomic(sg_page(sg)) + sg->offset;
3010 if (use_sg > 1) {
3011 retvalue = ARCMSR_MESSAGE_FAIL;
3012 goto message_out;
3013 }
3014 transfer_len += sg->length;
3015 if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) {
3016 retvalue = ARCMSR_MESSAGE_FAIL;
3017 pr_info("%s: ARCMSR_MESSAGE_FAIL!\n", __func__);
3018 goto message_out;
3019 }
3020 pcmdmessagefld = (struct CMD_MESSAGE_FIELD *)buffer;
3021 switch (controlcode) {
3022 case ARCMSR_MESSAGE_READ_RQBUFFER: {
3023 unsigned char *ver_addr;
3024 uint8_t *ptmpQbuffer;
3025 uint32_t allxfer_len = 0;
3026 ver_addr = kmalloc(ARCMSR_API_DATA_BUFLEN, GFP_ATOMIC);
3027 if (!ver_addr) {
3028 retvalue = ARCMSR_MESSAGE_FAIL;
3029 pr_info("%s: memory not enough!\n", __func__);
3030 goto message_out;
3031 }
3032 ptmpQbuffer = ver_addr;
3033 spin_lock_irqsave(&acb->rqbuffer_lock, flags);
3034 if (acb->rqbuf_getIndex != acb->rqbuf_putIndex) {
3035 unsigned int tail = acb->rqbuf_getIndex;
3036 unsigned int head = acb->rqbuf_putIndex;
3037 unsigned int cnt_to_end = CIRC_CNT_TO_END(head, tail, ARCMSR_MAX_QBUFFER);
3038
3039 allxfer_len = CIRC_CNT(head, tail, ARCMSR_MAX_QBUFFER);
3040 if (allxfer_len > ARCMSR_API_DATA_BUFLEN)
3041 allxfer_len = ARCMSR_API_DATA_BUFLEN;
3042
3043 if (allxfer_len <= cnt_to_end)
3044 memcpy(ptmpQbuffer, acb->rqbuffer + tail, allxfer_len);
3045 else {
3046 memcpy(ptmpQbuffer, acb->rqbuffer + tail, cnt_to_end);
3047 memcpy(ptmpQbuffer + cnt_to_end, acb->rqbuffer, allxfer_len - cnt_to_end);
3048 }
3049 acb->rqbuf_getIndex = (acb->rqbuf_getIndex + allxfer_len) % ARCMSR_MAX_QBUFFER;
3050 }
3051 memcpy(pcmdmessagefld->messagedatabuffer, ver_addr,
3052 allxfer_len);
3053 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
3054 struct QBUFFER __iomem *prbuffer;
3055 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
3056 prbuffer = arcmsr_get_iop_rqbuffer(acb);
3057 if (arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0)
3058 acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
3059 }
3060 spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
3061 kfree(ver_addr);
3062 pcmdmessagefld->cmdmessage.Length = allxfer_len;
3063 if (acb->fw_flag == FW_DEADLOCK)
3064 pcmdmessagefld->cmdmessage.ReturnCode =
3065 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
3066 else
3067 pcmdmessagefld->cmdmessage.ReturnCode =
3068 ARCMSR_MESSAGE_RETURNCODE_OK;
3069 break;
3070 }
3071 case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
3072 unsigned char *ver_addr;
3073 uint32_t user_len;
3074 int32_t cnt2end;
3075 uint8_t *pQbuffer, *ptmpuserbuffer;
3076
3077 user_len = pcmdmessagefld->cmdmessage.Length;
3078 if (user_len > ARCMSR_API_DATA_BUFLEN) {
3079 retvalue = ARCMSR_MESSAGE_FAIL;
3080 goto message_out;
3081 }
3082
3083 ver_addr = kmalloc(ARCMSR_API_DATA_BUFLEN, GFP_ATOMIC);
3084 if (!ver_addr) {
3085 retvalue = ARCMSR_MESSAGE_FAIL;
3086 goto message_out;
3087 }
3088 ptmpuserbuffer = ver_addr;
3089
3090 memcpy(ptmpuserbuffer,
3091 pcmdmessagefld->messagedatabuffer, user_len);
3092 spin_lock_irqsave(&acb->wqbuffer_lock, flags);
3093 if (acb->wqbuf_putIndex != acb->wqbuf_getIndex) {
3094 struct SENSE_DATA *sensebuffer =
3095 (struct SENSE_DATA *)cmd->sense_buffer;
3096 arcmsr_write_ioctldata2iop(acb);
3097 /* has error report sensedata */
3098 sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS;
3099 sensebuffer->SenseKey = ILLEGAL_REQUEST;
3100 sensebuffer->AdditionalSenseLength = 0x0A;
3101 sensebuffer->AdditionalSenseCode = 0x20;
3102 sensebuffer->Valid = 1;
3103 retvalue = ARCMSR_MESSAGE_FAIL;
3104 } else {
3105 pQbuffer = &acb->wqbuffer[acb->wqbuf_putIndex];
3106 cnt2end = ARCMSR_MAX_QBUFFER - acb->wqbuf_putIndex;
3107 if (user_len > cnt2end) {
3108 memcpy(pQbuffer, ptmpuserbuffer, cnt2end);
3109 ptmpuserbuffer += cnt2end;
3110 user_len -= cnt2end;
3111 acb->wqbuf_putIndex = 0;
3112 pQbuffer = acb->wqbuffer;
3113 }
3114 memcpy(pQbuffer, ptmpuserbuffer, user_len);
3115 acb->wqbuf_putIndex += user_len;
3116 acb->wqbuf_putIndex %= ARCMSR_MAX_QBUFFER;
3117 if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
3118 acb->acb_flags &=
3119 ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
3120 arcmsr_write_ioctldata2iop(acb);
3121 }
3122 }
3123 spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
3124 kfree(ver_addr);
3125 if (acb->fw_flag == FW_DEADLOCK)
3126 pcmdmessagefld->cmdmessage.ReturnCode =
3127 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
3128 else
3129 pcmdmessagefld->cmdmessage.ReturnCode =
3130 ARCMSR_MESSAGE_RETURNCODE_OK;
3131 break;
3132 }
3133 case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
3134 uint8_t *pQbuffer = acb->rqbuffer;
3135
3136 arcmsr_clear_iop2drv_rqueue_buffer(acb);
3137 spin_lock_irqsave(&acb->rqbuffer_lock, flags);
3138 acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
3139 acb->rqbuf_getIndex = 0;
3140 acb->rqbuf_putIndex = 0;
3141 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
3142 spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
3143 if (acb->fw_flag == FW_DEADLOCK)
3144 pcmdmessagefld->cmdmessage.ReturnCode =
3145 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
3146 else
3147 pcmdmessagefld->cmdmessage.ReturnCode =
3148 ARCMSR_MESSAGE_RETURNCODE_OK;
3149 break;
3150 }
3151 case ARCMSR_MESSAGE_CLEAR_WQBUFFER: {
3152 uint8_t *pQbuffer = acb->wqbuffer;
3153 spin_lock_irqsave(&acb->wqbuffer_lock, flags);
3154 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
3155 ACB_F_MESSAGE_WQBUFFER_READED);
3156 acb->wqbuf_getIndex = 0;
3157 acb->wqbuf_putIndex = 0;
3158 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
3159 spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
3160 if (acb->fw_flag == FW_DEADLOCK)
3161 pcmdmessagefld->cmdmessage.ReturnCode =
3162 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
3163 else
3164 pcmdmessagefld->cmdmessage.ReturnCode =
3165 ARCMSR_MESSAGE_RETURNCODE_OK;
3166 break;
3167 }
3168 case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
3169 uint8_t *pQbuffer;
3170 arcmsr_clear_iop2drv_rqueue_buffer(acb);
3171 spin_lock_irqsave(&acb->rqbuffer_lock, flags);
3172 acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
3173 acb->rqbuf_getIndex = 0;
3174 acb->rqbuf_putIndex = 0;
3175 pQbuffer = acb->rqbuffer;
3176 memset(pQbuffer, 0, sizeof(struct QBUFFER));
3177 spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
3178 spin_lock_irqsave(&acb->wqbuffer_lock, flags);
3179 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
3180 ACB_F_MESSAGE_WQBUFFER_READED);
3181 acb->wqbuf_getIndex = 0;
3182 acb->wqbuf_putIndex = 0;
3183 pQbuffer = acb->wqbuffer;
3184 memset(pQbuffer, 0, sizeof(struct QBUFFER));
3185 spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
3186 if (acb->fw_flag == FW_DEADLOCK)
3187 pcmdmessagefld->cmdmessage.ReturnCode =
3188 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
3189 else
3190 pcmdmessagefld->cmdmessage.ReturnCode =
3191 ARCMSR_MESSAGE_RETURNCODE_OK;
3192 break;
3193 }
3194 case ARCMSR_MESSAGE_RETURN_CODE_3F: {
3195 if (acb->fw_flag == FW_DEADLOCK)
3196 pcmdmessagefld->cmdmessage.ReturnCode =
3197 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
3198 else
3199 pcmdmessagefld->cmdmessage.ReturnCode =
3200 ARCMSR_MESSAGE_RETURNCODE_3F;
3201 break;
3202 }
3203 case ARCMSR_MESSAGE_SAY_HELLO: {
3204 int8_t *hello_string = "Hello! I am ARCMSR";
3205 if (acb->fw_flag == FW_DEADLOCK)
3206 pcmdmessagefld->cmdmessage.ReturnCode =
3207 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
3208 else
3209 pcmdmessagefld->cmdmessage.ReturnCode =
3210 ARCMSR_MESSAGE_RETURNCODE_OK;
3211 memcpy(pcmdmessagefld->messagedatabuffer,
3212 hello_string, (int16_t)strlen(hello_string));
3213 break;
3214 }
3215 case ARCMSR_MESSAGE_SAY_GOODBYE: {
3216 if (acb->fw_flag == FW_DEADLOCK)
3217 pcmdmessagefld->cmdmessage.ReturnCode =
3218 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
3219 else
3220 pcmdmessagefld->cmdmessage.ReturnCode =
3221 ARCMSR_MESSAGE_RETURNCODE_OK;
3222 arcmsr_iop_parking(acb);
3223 break;
3224 }
3225 case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: {
3226 if (acb->fw_flag == FW_DEADLOCK)
3227 pcmdmessagefld->cmdmessage.ReturnCode =
3228 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
3229 else
3230 pcmdmessagefld->cmdmessage.ReturnCode =
3231 ARCMSR_MESSAGE_RETURNCODE_OK;
3232 arcmsr_flush_adapter_cache(acb);
3233 break;
3234 }
3235 default:
3236 retvalue = ARCMSR_MESSAGE_FAIL;
3237 pr_info("%s: unknown controlcode!\n", __func__);
3238 }
3239 message_out:
3240 if (use_sg) {
3241 struct scatterlist *sg = scsi_sglist(cmd);
3242 kunmap_atomic(buffer - sg->offset);
3243 }
3244 return retvalue;
3245 }
3246
arcmsr_get_freeccb(struct AdapterControlBlock * acb)3247 static struct CommandControlBlock *arcmsr_get_freeccb(struct AdapterControlBlock *acb)
3248 {
3249 struct list_head *head;
3250 struct CommandControlBlock *ccb = NULL;
3251 unsigned long flags;
3252
3253 spin_lock_irqsave(&acb->ccblist_lock, flags);
3254 head = &acb->ccb_free_list;
3255 if (!list_empty(head)) {
3256 ccb = list_entry(head->next, struct CommandControlBlock, list);
3257 list_del_init(&ccb->list);
3258 }else{
3259 spin_unlock_irqrestore(&acb->ccblist_lock, flags);
3260 return NULL;
3261 }
3262 spin_unlock_irqrestore(&acb->ccblist_lock, flags);
3263 return ccb;
3264 }
3265
arcmsr_handle_virtual_command(struct AdapterControlBlock * acb,struct scsi_cmnd * cmd)3266 static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
3267 struct scsi_cmnd *cmd)
3268 {
3269 switch (cmd->cmnd[0]) {
3270 case INQUIRY: {
3271 unsigned char inqdata[36];
3272 char *buffer;
3273 struct scatterlist *sg;
3274
3275 if (cmd->device->lun) {
3276 cmd->result = (DID_TIME_OUT << 16);
3277 scsi_done(cmd);
3278 return;
3279 }
3280 inqdata[0] = TYPE_PROCESSOR;
3281 /* Periph Qualifier & Periph Dev Type */
3282 inqdata[1] = 0;
3283 /* rem media bit & Dev Type Modifier */
3284 inqdata[2] = 0;
3285 /* ISO, ECMA, & ANSI versions */
3286 inqdata[4] = 31;
3287 /* length of additional data */
3288 memcpy(&inqdata[8], "Areca ", 8);
3289 /* Vendor Identification */
3290 memcpy(&inqdata[16], "RAID controller ", 16);
3291 /* Product Identification */
3292 memcpy(&inqdata[32], "R001", 4); /* Product Revision */
3293
3294 sg = scsi_sglist(cmd);
3295 buffer = kmap_atomic(sg_page(sg)) + sg->offset;
3296
3297 memcpy(buffer, inqdata, sizeof(inqdata));
3298 sg = scsi_sglist(cmd);
3299 kunmap_atomic(buffer - sg->offset);
3300
3301 scsi_done(cmd);
3302 }
3303 break;
3304 case WRITE_BUFFER:
3305 case READ_BUFFER: {
3306 if (arcmsr_iop_message_xfer(acb, cmd))
3307 cmd->result = (DID_ERROR << 16);
3308 scsi_done(cmd);
3309 }
3310 break;
3311 default:
3312 scsi_done(cmd);
3313 }
3314 }
3315
arcmsr_queue_command_lck(struct scsi_cmnd * cmd)3316 static enum scsi_qc_status arcmsr_queue_command_lck(struct scsi_cmnd *cmd)
3317 {
3318 struct Scsi_Host *host = cmd->device->host;
3319 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
3320 struct CommandControlBlock *ccb;
3321 int target = cmd->device->id;
3322
3323 if (acb->acb_flags & ACB_F_ADAPTER_REMOVED) {
3324 cmd->result = (DID_NO_CONNECT << 16);
3325 scsi_done(cmd);
3326 return 0;
3327 }
3328 cmd->host_scribble = NULL;
3329 cmd->result = 0;
3330 if (target == 16) {
3331 /* virtual device for iop message transfer */
3332 arcmsr_handle_virtual_command(acb, cmd);
3333 return 0;
3334 }
3335 ccb = arcmsr_get_freeccb(acb);
3336 if (!ccb)
3337 return SCSI_MLQUEUE_HOST_BUSY;
3338 if (arcmsr_build_ccb( acb, ccb, cmd ) == FAILED) {
3339 cmd->result = (DID_ERROR << 16) | SAM_STAT_RESERVATION_CONFLICT;
3340 scsi_done(cmd);
3341 return 0;
3342 }
3343 arcmsr_post_ccb(acb, ccb);
3344 return 0;
3345 }
3346
DEF_SCSI_QCMD(arcmsr_queue_command)3347 static DEF_SCSI_QCMD(arcmsr_queue_command)
3348
3349 static int arcmsr_sdev_configure(struct scsi_device *sdev,
3350 struct queue_limits *lim)
3351 {
3352 unsigned int dev_timeout;
3353
3354 dev_timeout = sdev->request_queue->rq_timeout;
3355 if ((cmd_timeout > 0) && ((cmd_timeout * HZ) > dev_timeout))
3356 blk_queue_rq_timeout(sdev->request_queue, cmd_timeout * HZ);
3357 return 0;
3358 }
3359
arcmsr_get_adapter_config(struct AdapterControlBlock * pACB,uint32_t * rwbuffer)3360 static void arcmsr_get_adapter_config(struct AdapterControlBlock *pACB, uint32_t *rwbuffer)
3361 {
3362 int count;
3363 uint32_t *acb_firm_model = (uint32_t *)pACB->firm_model;
3364 uint32_t *acb_firm_version = (uint32_t *)pACB->firm_version;
3365 uint32_t *acb_device_map = (uint32_t *)pACB->device_map;
3366 uint32_t *firm_model = &rwbuffer[15];
3367 uint32_t *firm_version = &rwbuffer[17];
3368 uint32_t *device_map = &rwbuffer[21];
3369
3370 count = 2;
3371 while (count) {
3372 *acb_firm_model = readl(firm_model);
3373 acb_firm_model++;
3374 firm_model++;
3375 count--;
3376 }
3377 count = 4;
3378 while (count) {
3379 *acb_firm_version = readl(firm_version);
3380 acb_firm_version++;
3381 firm_version++;
3382 count--;
3383 }
3384 count = 4;
3385 while (count) {
3386 *acb_device_map = readl(device_map);
3387 acb_device_map++;
3388 device_map++;
3389 count--;
3390 }
3391 pACB->signature = readl(&rwbuffer[0]);
3392 pACB->firm_request_len = readl(&rwbuffer[1]);
3393 pACB->firm_numbers_queue = readl(&rwbuffer[2]);
3394 pACB->firm_sdram_size = readl(&rwbuffer[3]);
3395 pACB->firm_hd_channels = readl(&rwbuffer[4]);
3396 pACB->firm_cfg_version = readl(&rwbuffer[25]);
3397 if (pACB->adapter_type == ACB_ADAPTER_TYPE_F)
3398 pACB->firm_PicStatus = readl(&rwbuffer[30]);
3399 else
3400 pACB->firm_PicStatus = 0;
3401 pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
3402 pACB->host->host_no,
3403 pACB->firm_model,
3404 pACB->firm_version);
3405 }
3406
arcmsr_hbaA_get_config(struct AdapterControlBlock * acb)3407 static bool arcmsr_hbaA_get_config(struct AdapterControlBlock *acb)
3408 {
3409 struct MessageUnit_A __iomem *reg = acb->pmuA;
3410
3411 arcmsr_wait_firmware_ready(acb);
3412 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0);
3413 if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
3414 printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
3415 miscellaneous data' timeout \n", acb->host->host_no);
3416 return false;
3417 }
3418 arcmsr_get_adapter_config(acb, reg->message_rwbuffer);
3419 return true;
3420 }
arcmsr_hbaB_get_config(struct AdapterControlBlock * acb)3421 static bool arcmsr_hbaB_get_config(struct AdapterControlBlock *acb)
3422 {
3423 struct MessageUnit_B *reg = acb->pmuB;
3424
3425 arcmsr_wait_firmware_ready(acb);
3426 writel(ARCMSR_MESSAGE_START_DRIVER_MODE, reg->drv2iop_doorbell);
3427 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
3428 printk(KERN_ERR "arcmsr%d: can't set driver mode.\n", acb->host->host_no);
3429 return false;
3430 }
3431 writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell);
3432 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
3433 printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
3434 miscellaneous data' timeout \n", acb->host->host_no);
3435 return false;
3436 }
3437 arcmsr_get_adapter_config(acb, reg->message_rwbuffer);
3438 return true;
3439 }
3440
arcmsr_hbaC_get_config(struct AdapterControlBlock * pACB)3441 static bool arcmsr_hbaC_get_config(struct AdapterControlBlock *pACB)
3442 {
3443 uint32_t intmask_org;
3444 struct MessageUnit_C __iomem *reg = pACB->pmuC;
3445
3446 /* disable all outbound interrupt */
3447 intmask_org = readl(®->host_int_mask); /* disable outbound message0 int */
3448 writel(intmask_org|ARCMSR_HBCMU_ALL_INTMASKENABLE, ®->host_int_mask);
3449 /* wait firmware ready */
3450 arcmsr_wait_firmware_ready(pACB);
3451 /* post "get config" instruction */
3452 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0);
3453 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell);
3454 /* wait message ready */
3455 if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
3456 printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
3457 miscellaneous data' timeout \n", pACB->host->host_no);
3458 return false;
3459 }
3460 arcmsr_get_adapter_config(pACB, reg->msgcode_rwbuffer);
3461 return true;
3462 }
3463
arcmsr_hbaD_get_config(struct AdapterControlBlock * acb)3464 static bool arcmsr_hbaD_get_config(struct AdapterControlBlock *acb)
3465 {
3466 struct MessageUnit_D *reg = acb->pmuD;
3467
3468 if (readl(acb->pmuD->outbound_doorbell) &
3469 ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE) {
3470 writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE,
3471 acb->pmuD->outbound_doorbell);/*clear interrupt*/
3472 }
3473 arcmsr_wait_firmware_ready(acb);
3474 /* post "get config" instruction */
3475 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, reg->inbound_msgaddr0);
3476 /* wait message ready */
3477 if (!arcmsr_hbaD_wait_msgint_ready(acb)) {
3478 pr_notice("arcmsr%d: wait get adapter firmware "
3479 "miscellaneous data timeout\n", acb->host->host_no);
3480 return false;
3481 }
3482 arcmsr_get_adapter_config(acb, reg->msgcode_rwbuffer);
3483 return true;
3484 }
3485
arcmsr_hbaE_get_config(struct AdapterControlBlock * pACB)3486 static bool arcmsr_hbaE_get_config(struct AdapterControlBlock *pACB)
3487 {
3488 struct MessageUnit_E __iomem *reg = pACB->pmuE;
3489 uint32_t intmask_org;
3490
3491 /* disable all outbound interrupt */
3492 intmask_org = readl(®->host_int_mask); /* disable outbound message0 int */
3493 writel(intmask_org | ARCMSR_HBEMU_ALL_INTMASKENABLE, ®->host_int_mask);
3494 /* wait firmware ready */
3495 arcmsr_wait_firmware_ready(pACB);
3496 mdelay(20);
3497 /* post "get config" instruction */
3498 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0);
3499
3500 pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
3501 writel(pACB->out_doorbell, ®->iobound_doorbell);
3502 /* wait message ready */
3503 if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
3504 pr_notice("arcmsr%d: wait get adapter firmware "
3505 "miscellaneous data timeout\n", pACB->host->host_no);
3506 return false;
3507 }
3508 arcmsr_get_adapter_config(pACB, reg->msgcode_rwbuffer);
3509 return true;
3510 }
3511
arcmsr_hbaF_get_config(struct AdapterControlBlock * pACB)3512 static bool arcmsr_hbaF_get_config(struct AdapterControlBlock *pACB)
3513 {
3514 struct MessageUnit_F __iomem *reg = pACB->pmuF;
3515 uint32_t intmask_org;
3516
3517 /* disable all outbound interrupt */
3518 intmask_org = readl(®->host_int_mask); /* disable outbound message0 int */
3519 writel(intmask_org | ARCMSR_HBEMU_ALL_INTMASKENABLE, ®->host_int_mask);
3520 /* wait firmware ready */
3521 arcmsr_wait_firmware_ready(pACB);
3522 /* post "get config" instruction */
3523 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0);
3524
3525 pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
3526 writel(pACB->out_doorbell, ®->iobound_doorbell);
3527 /* wait message ready */
3528 if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
3529 pr_notice("arcmsr%d: wait get adapter firmware miscellaneous data timeout\n",
3530 pACB->host->host_no);
3531 return false;
3532 }
3533 arcmsr_get_adapter_config(pACB, pACB->msgcode_rwbuffer);
3534 return true;
3535 }
3536
arcmsr_get_firmware_spec(struct AdapterControlBlock * acb)3537 static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb)
3538 {
3539 bool rtn = false;
3540
3541 switch (acb->adapter_type) {
3542 case ACB_ADAPTER_TYPE_A:
3543 rtn = arcmsr_hbaA_get_config(acb);
3544 break;
3545 case ACB_ADAPTER_TYPE_B:
3546 rtn = arcmsr_hbaB_get_config(acb);
3547 break;
3548 case ACB_ADAPTER_TYPE_C:
3549 rtn = arcmsr_hbaC_get_config(acb);
3550 break;
3551 case ACB_ADAPTER_TYPE_D:
3552 rtn = arcmsr_hbaD_get_config(acb);
3553 break;
3554 case ACB_ADAPTER_TYPE_E:
3555 rtn = arcmsr_hbaE_get_config(acb);
3556 break;
3557 case ACB_ADAPTER_TYPE_F:
3558 rtn = arcmsr_hbaF_get_config(acb);
3559 break;
3560 default:
3561 break;
3562 }
3563 acb->maxOutstanding = acb->firm_numbers_queue - 1;
3564 if (acb->host->can_queue >= acb->firm_numbers_queue)
3565 acb->host->can_queue = acb->maxOutstanding;
3566 else
3567 acb->maxOutstanding = acb->host->can_queue;
3568 acb->maxFreeCCB = acb->host->can_queue;
3569 if (acb->maxFreeCCB < ARCMSR_MAX_FREECCB_NUM)
3570 acb->maxFreeCCB += 64;
3571 return rtn;
3572 }
3573
arcmsr_hbaA_polling_ccbdone(struct AdapterControlBlock * acb,struct CommandControlBlock * poll_ccb)3574 static int arcmsr_hbaA_polling_ccbdone(struct AdapterControlBlock *acb,
3575 struct CommandControlBlock *poll_ccb)
3576 {
3577 struct MessageUnit_A __iomem *reg = acb->pmuA;
3578 struct CommandControlBlock *ccb;
3579 struct ARCMSR_CDB *arcmsr_cdb;
3580 uint32_t flag_ccb, outbound_intstatus, poll_ccb_done = 0, poll_count = 0;
3581 int rtn;
3582 bool error;
3583 unsigned long ccb_cdb_phy;
3584
3585 polling_hba_ccb_retry:
3586 poll_count++;
3587 outbound_intstatus = readl(®->outbound_intstatus) & acb->outbound_int_enable;
3588 writel(outbound_intstatus, ®->outbound_intstatus);/*clear interrupt*/
3589 while (1) {
3590 if ((flag_ccb = readl(®->outbound_queueport)) == 0xFFFFFFFF) {
3591 if (poll_ccb_done){
3592 rtn = SUCCESS;
3593 break;
3594 }else {
3595 msleep(25);
3596 if (poll_count > 100){
3597 rtn = FAILED;
3598 break;
3599 }
3600 goto polling_hba_ccb_retry;
3601 }
3602 }
3603 ccb_cdb_phy = (flag_ccb << 5) & 0xffffffff;
3604 if (acb->cdb_phyadd_hipart)
3605 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
3606 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
3607 ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
3608 poll_ccb_done |= (ccb == poll_ccb) ? 1 : 0;
3609 if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
3610 if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) {
3611 printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
3612 " poll command abort successfully \n"
3613 , acb->host->host_no
3614 , ccb->pcmd->device->id
3615 , (u32)ccb->pcmd->device->lun
3616 , ccb);
3617 ccb->pcmd->result = DID_ABORT << 16;
3618 arcmsr_ccb_complete(ccb);
3619 continue;
3620 }
3621 printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"
3622 " command done ccb = '0x%p'"
3623 "ccboutstandingcount = %d \n"
3624 , acb->host->host_no
3625 , ccb
3626 , atomic_read(&acb->ccboutstandingcount));
3627 continue;
3628 }
3629 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
3630 arcmsr_report_ccb_state(acb, ccb, error);
3631 }
3632 return rtn;
3633 }
3634
arcmsr_hbaB_polling_ccbdone(struct AdapterControlBlock * acb,struct CommandControlBlock * poll_ccb)3635 static int arcmsr_hbaB_polling_ccbdone(struct AdapterControlBlock *acb,
3636 struct CommandControlBlock *poll_ccb)
3637 {
3638 struct MessageUnit_B *reg = acb->pmuB;
3639 struct ARCMSR_CDB *arcmsr_cdb;
3640 struct CommandControlBlock *ccb;
3641 uint32_t flag_ccb, poll_ccb_done = 0, poll_count = 0;
3642 int index, rtn;
3643 bool error;
3644 unsigned long ccb_cdb_phy;
3645
3646 polling_hbb_ccb_retry:
3647 poll_count++;
3648 /* clear doorbell interrupt */
3649 writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
3650 while(1){
3651 index = reg->doneq_index;
3652 flag_ccb = reg->done_qbuffer[index];
3653 if (flag_ccb == 0) {
3654 if (poll_ccb_done){
3655 rtn = SUCCESS;
3656 break;
3657 }else {
3658 msleep(25);
3659 if (poll_count > 100){
3660 rtn = FAILED;
3661 break;
3662 }
3663 goto polling_hbb_ccb_retry;
3664 }
3665 }
3666 reg->done_qbuffer[index] = 0;
3667 index++;
3668 /*if last index number set it to 0 */
3669 index %= ARCMSR_MAX_HBB_POSTQUEUE;
3670 reg->doneq_index = index;
3671 /* check if command done with no error*/
3672 ccb_cdb_phy = (flag_ccb << 5) & 0xffffffff;
3673 if (acb->cdb_phyadd_hipart)
3674 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
3675 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
3676 ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
3677 poll_ccb_done |= (ccb == poll_ccb) ? 1 : 0;
3678 if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
3679 if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) {
3680 printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
3681 " poll command abort successfully \n"
3682 ,acb->host->host_no
3683 ,ccb->pcmd->device->id
3684 ,(u32)ccb->pcmd->device->lun
3685 ,ccb);
3686 ccb->pcmd->result = DID_ABORT << 16;
3687 arcmsr_ccb_complete(ccb);
3688 continue;
3689 }
3690 printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"
3691 " command done ccb = '0x%p'"
3692 "ccboutstandingcount = %d \n"
3693 , acb->host->host_no
3694 , ccb
3695 , atomic_read(&acb->ccboutstandingcount));
3696 continue;
3697 }
3698 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
3699 arcmsr_report_ccb_state(acb, ccb, error);
3700 }
3701 return rtn;
3702 }
3703
arcmsr_hbaC_polling_ccbdone(struct AdapterControlBlock * acb,struct CommandControlBlock * poll_ccb)3704 static int arcmsr_hbaC_polling_ccbdone(struct AdapterControlBlock *acb,
3705 struct CommandControlBlock *poll_ccb)
3706 {
3707 struct MessageUnit_C __iomem *reg = acb->pmuC;
3708 uint32_t flag_ccb;
3709 struct ARCMSR_CDB *arcmsr_cdb;
3710 bool error;
3711 struct CommandControlBlock *pCCB;
3712 uint32_t poll_ccb_done = 0, poll_count = 0;
3713 int rtn;
3714 unsigned long ccb_cdb_phy;
3715
3716 polling_hbc_ccb_retry:
3717 poll_count++;
3718 while (1) {
3719 if ((readl(®->host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) == 0) {
3720 if (poll_ccb_done) {
3721 rtn = SUCCESS;
3722 break;
3723 } else {
3724 msleep(25);
3725 if (poll_count > 100) {
3726 rtn = FAILED;
3727 break;
3728 }
3729 goto polling_hbc_ccb_retry;
3730 }
3731 }
3732 flag_ccb = readl(®->outbound_queueport_low);
3733 ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
3734 if (acb->cdb_phyadd_hipart)
3735 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
3736 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
3737 pCCB = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
3738 poll_ccb_done |= (pCCB == poll_ccb) ? 1 : 0;
3739 /* check ifcommand done with no error*/
3740 if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) {
3741 if (pCCB->startdone == ARCMSR_CCB_ABORTED) {
3742 printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
3743 " poll command abort successfully \n"
3744 , acb->host->host_no
3745 , pCCB->pcmd->device->id
3746 , (u32)pCCB->pcmd->device->lun
3747 , pCCB);
3748 pCCB->pcmd->result = DID_ABORT << 16;
3749 arcmsr_ccb_complete(pCCB);
3750 continue;
3751 }
3752 printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"
3753 " command done ccb = '0x%p'"
3754 "ccboutstandingcount = %d \n"
3755 , acb->host->host_no
3756 , pCCB
3757 , atomic_read(&acb->ccboutstandingcount));
3758 continue;
3759 }
3760 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
3761 arcmsr_report_ccb_state(acb, pCCB, error);
3762 }
3763 return rtn;
3764 }
3765
arcmsr_hbaD_polling_ccbdone(struct AdapterControlBlock * acb,struct CommandControlBlock * poll_ccb)3766 static int arcmsr_hbaD_polling_ccbdone(struct AdapterControlBlock *acb,
3767 struct CommandControlBlock *poll_ccb)
3768 {
3769 bool error;
3770 uint32_t poll_ccb_done = 0, poll_count = 0, flag_ccb;
3771 int rtn, doneq_index, index_stripped, outbound_write_pointer, toggle;
3772 unsigned long flags, ccb_cdb_phy;
3773 struct ARCMSR_CDB *arcmsr_cdb;
3774 struct CommandControlBlock *pCCB;
3775 struct MessageUnit_D *pmu = acb->pmuD;
3776
3777 polling_hbaD_ccb_retry:
3778 poll_count++;
3779 while (1) {
3780 spin_lock_irqsave(&acb->doneq_lock, flags);
3781 outbound_write_pointer = pmu->done_qbuffer[0].addressLow + 1;
3782 doneq_index = pmu->doneq_index;
3783 if ((outbound_write_pointer & 0xFFF) == (doneq_index & 0xFFF)) {
3784 spin_unlock_irqrestore(&acb->doneq_lock, flags);
3785 if (poll_ccb_done) {
3786 rtn = SUCCESS;
3787 break;
3788 } else {
3789 msleep(25);
3790 if (poll_count > 40) {
3791 rtn = FAILED;
3792 break;
3793 }
3794 goto polling_hbaD_ccb_retry;
3795 }
3796 }
3797 toggle = doneq_index & 0x4000;
3798 index_stripped = (doneq_index & 0xFFF) + 1;
3799 index_stripped %= ARCMSR_MAX_ARC1214_DONEQUEUE;
3800 pmu->doneq_index = index_stripped ? (index_stripped | toggle) :
3801 ((toggle ^ 0x4000) + 1);
3802 doneq_index = pmu->doneq_index;
3803 spin_unlock_irqrestore(&acb->doneq_lock, flags);
3804 flag_ccb = pmu->done_qbuffer[doneq_index & 0xFFF].addressLow;
3805 ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
3806 if (acb->cdb_phyadd_hipart)
3807 ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart;
3808 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset +
3809 ccb_cdb_phy);
3810 pCCB = container_of(arcmsr_cdb, struct CommandControlBlock,
3811 arcmsr_cdb);
3812 poll_ccb_done |= (pCCB == poll_ccb) ? 1 : 0;
3813 if ((pCCB->acb != acb) ||
3814 (pCCB->startdone != ARCMSR_CCB_START)) {
3815 if (pCCB->startdone == ARCMSR_CCB_ABORTED) {
3816 pr_notice("arcmsr%d: scsi id = %d "
3817 "lun = %d ccb = '0x%p' poll command "
3818 "abort successfully\n"
3819 , acb->host->host_no
3820 , pCCB->pcmd->device->id
3821 , (u32)pCCB->pcmd->device->lun
3822 , pCCB);
3823 pCCB->pcmd->result = DID_ABORT << 16;
3824 arcmsr_ccb_complete(pCCB);
3825 continue;
3826 }
3827 pr_notice("arcmsr%d: polling an illegal "
3828 "ccb command done ccb = '0x%p' "
3829 "ccboutstandingcount = %d\n"
3830 , acb->host->host_no
3831 , pCCB
3832 , atomic_read(&acb->ccboutstandingcount));
3833 continue;
3834 }
3835 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1)
3836 ? true : false;
3837 arcmsr_report_ccb_state(acb, pCCB, error);
3838 }
3839 return rtn;
3840 }
3841
arcmsr_hbaE_polling_ccbdone(struct AdapterControlBlock * acb,struct CommandControlBlock * poll_ccb)3842 static int arcmsr_hbaE_polling_ccbdone(struct AdapterControlBlock *acb,
3843 struct CommandControlBlock *poll_ccb)
3844 {
3845 bool error;
3846 uint32_t poll_ccb_done = 0, poll_count = 0, doneq_index;
3847 uint16_t cmdSMID;
3848 unsigned long flags;
3849 int rtn;
3850 struct CommandControlBlock *pCCB;
3851 struct MessageUnit_E __iomem *reg = acb->pmuE;
3852
3853 polling_hbaC_ccb_retry:
3854 poll_count++;
3855 while (1) {
3856 spin_lock_irqsave(&acb->doneq_lock, flags);
3857 doneq_index = acb->doneq_index;
3858 if ((readl(®->reply_post_producer_index) & 0xFFFF) ==
3859 doneq_index) {
3860 spin_unlock_irqrestore(&acb->doneq_lock, flags);
3861 if (poll_ccb_done) {
3862 rtn = SUCCESS;
3863 break;
3864 } else {
3865 msleep(25);
3866 if (poll_count > 40) {
3867 rtn = FAILED;
3868 break;
3869 }
3870 goto polling_hbaC_ccb_retry;
3871 }
3872 }
3873 cmdSMID = acb->pCompletionQ[doneq_index].cmdSMID;
3874 doneq_index++;
3875 if (doneq_index >= acb->completionQ_entry)
3876 doneq_index = 0;
3877 acb->doneq_index = doneq_index;
3878 spin_unlock_irqrestore(&acb->doneq_lock, flags);
3879 pCCB = acb->pccb_pool[cmdSMID];
3880 poll_ccb_done |= (pCCB == poll_ccb) ? 1 : 0;
3881 /* check if command done with no error*/
3882 if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) {
3883 if (pCCB->startdone == ARCMSR_CCB_ABORTED) {
3884 pr_notice("arcmsr%d: scsi id = %d "
3885 "lun = %d ccb = '0x%p' poll command "
3886 "abort successfully\n"
3887 , acb->host->host_no
3888 , pCCB->pcmd->device->id
3889 , (u32)pCCB->pcmd->device->lun
3890 , pCCB);
3891 pCCB->pcmd->result = DID_ABORT << 16;
3892 arcmsr_ccb_complete(pCCB);
3893 continue;
3894 }
3895 pr_notice("arcmsr%d: polling an illegal "
3896 "ccb command done ccb = '0x%p' "
3897 "ccboutstandingcount = %d\n"
3898 , acb->host->host_no
3899 , pCCB
3900 , atomic_read(&acb->ccboutstandingcount));
3901 continue;
3902 }
3903 error = (acb->pCompletionQ[doneq_index].cmdFlag &
3904 ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
3905 arcmsr_report_ccb_state(acb, pCCB, error);
3906 }
3907 writel(doneq_index, ®->reply_post_consumer_index);
3908 return rtn;
3909 }
3910
arcmsr_polling_ccbdone(struct AdapterControlBlock * acb,struct CommandControlBlock * poll_ccb)3911 static int arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
3912 struct CommandControlBlock *poll_ccb)
3913 {
3914 int rtn = 0;
3915 switch (acb->adapter_type) {
3916
3917 case ACB_ADAPTER_TYPE_A:
3918 rtn = arcmsr_hbaA_polling_ccbdone(acb, poll_ccb);
3919 break;
3920 case ACB_ADAPTER_TYPE_B:
3921 rtn = arcmsr_hbaB_polling_ccbdone(acb, poll_ccb);
3922 break;
3923 case ACB_ADAPTER_TYPE_C:
3924 rtn = arcmsr_hbaC_polling_ccbdone(acb, poll_ccb);
3925 break;
3926 case ACB_ADAPTER_TYPE_D:
3927 rtn = arcmsr_hbaD_polling_ccbdone(acb, poll_ccb);
3928 break;
3929 case ACB_ADAPTER_TYPE_E:
3930 case ACB_ADAPTER_TYPE_F:
3931 rtn = arcmsr_hbaE_polling_ccbdone(acb, poll_ccb);
3932 break;
3933 }
3934 return rtn;
3935 }
3936
arcmsr_set_iop_datetime(struct timer_list * t)3937 static void arcmsr_set_iop_datetime(struct timer_list *t)
3938 {
3939 struct AdapterControlBlock *pacb = timer_container_of(pacb, t,
3940 refresh_timer);
3941 unsigned int next_time;
3942 struct tm tm;
3943
3944 union {
3945 struct {
3946 uint16_t signature;
3947 uint8_t year;
3948 uint8_t month;
3949 uint8_t date;
3950 uint8_t hour;
3951 uint8_t minute;
3952 uint8_t second;
3953 } a;
3954 struct {
3955 uint32_t msg_time[2];
3956 } b;
3957 } datetime;
3958
3959 time64_to_tm(ktime_get_real_seconds(), -sys_tz.tz_minuteswest * 60, &tm);
3960
3961 datetime.a.signature = 0x55AA;
3962 datetime.a.year = tm.tm_year - 100; /* base 2000 instead of 1900 */
3963 datetime.a.month = tm.tm_mon;
3964 datetime.a.date = tm.tm_mday;
3965 datetime.a.hour = tm.tm_hour;
3966 datetime.a.minute = tm.tm_min;
3967 datetime.a.second = tm.tm_sec;
3968
3969 switch (pacb->adapter_type) {
3970 case ACB_ADAPTER_TYPE_A: {
3971 struct MessageUnit_A __iomem *reg = pacb->pmuA;
3972 writel(datetime.b.msg_time[0], ®->message_rwbuffer[0]);
3973 writel(datetime.b.msg_time[1], ®->message_rwbuffer[1]);
3974 writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, ®->inbound_msgaddr0);
3975 break;
3976 }
3977 case ACB_ADAPTER_TYPE_B: {
3978 uint32_t __iomem *rwbuffer;
3979 struct MessageUnit_B *reg = pacb->pmuB;
3980 rwbuffer = reg->message_rwbuffer;
3981 writel(datetime.b.msg_time[0], rwbuffer++);
3982 writel(datetime.b.msg_time[1], rwbuffer++);
3983 writel(ARCMSR_MESSAGE_SYNC_TIMER, reg->drv2iop_doorbell);
3984 break;
3985 }
3986 case ACB_ADAPTER_TYPE_C: {
3987 struct MessageUnit_C __iomem *reg = pacb->pmuC;
3988 writel(datetime.b.msg_time[0], ®->msgcode_rwbuffer[0]);
3989 writel(datetime.b.msg_time[1], ®->msgcode_rwbuffer[1]);
3990 writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, ®->inbound_msgaddr0);
3991 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell);
3992 break;
3993 }
3994 case ACB_ADAPTER_TYPE_D: {
3995 uint32_t __iomem *rwbuffer;
3996 struct MessageUnit_D *reg = pacb->pmuD;
3997 rwbuffer = reg->msgcode_rwbuffer;
3998 writel(datetime.b.msg_time[0], rwbuffer++);
3999 writel(datetime.b.msg_time[1], rwbuffer++);
4000 writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, reg->inbound_msgaddr0);
4001 break;
4002 }
4003 case ACB_ADAPTER_TYPE_E: {
4004 struct MessageUnit_E __iomem *reg = pacb->pmuE;
4005 writel(datetime.b.msg_time[0], ®->msgcode_rwbuffer[0]);
4006 writel(datetime.b.msg_time[1], ®->msgcode_rwbuffer[1]);
4007 writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, ®->inbound_msgaddr0);
4008 pacb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
4009 writel(pacb->out_doorbell, ®->iobound_doorbell);
4010 break;
4011 }
4012 case ACB_ADAPTER_TYPE_F: {
4013 struct MessageUnit_F __iomem *reg = pacb->pmuF;
4014
4015 pacb->msgcode_rwbuffer[0] = datetime.b.msg_time[0];
4016 pacb->msgcode_rwbuffer[1] = datetime.b.msg_time[1];
4017 writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, ®->inbound_msgaddr0);
4018 pacb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
4019 writel(pacb->out_doorbell, ®->iobound_doorbell);
4020 break;
4021 }
4022 }
4023 if (sys_tz.tz_minuteswest)
4024 next_time = ARCMSR_HOURS;
4025 else
4026 next_time = ARCMSR_MINUTES;
4027 mod_timer(&pacb->refresh_timer, jiffies + msecs_to_jiffies(next_time));
4028 }
4029
arcmsr_iop_confirm(struct AdapterControlBlock * acb)4030 static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
4031 {
4032 uint32_t cdb_phyaddr, cdb_phyaddr_hi32;
4033 dma_addr_t dma_coherent_handle;
4034
4035 /*
4036 ********************************************************************
4037 ** here we need to tell iop 331 our freeccb.HighPart
4038 ** if freeccb.HighPart is not zero
4039 ********************************************************************
4040 */
4041 switch (acb->adapter_type) {
4042 case ACB_ADAPTER_TYPE_B:
4043 case ACB_ADAPTER_TYPE_D:
4044 dma_coherent_handle = acb->dma_coherent_handle2;
4045 break;
4046 case ACB_ADAPTER_TYPE_E:
4047 case ACB_ADAPTER_TYPE_F:
4048 dma_coherent_handle = acb->dma_coherent_handle +
4049 offsetof(struct CommandControlBlock, arcmsr_cdb);
4050 break;
4051 default:
4052 dma_coherent_handle = acb->dma_coherent_handle;
4053 break;
4054 }
4055 cdb_phyaddr = lower_32_bits(dma_coherent_handle);
4056 cdb_phyaddr_hi32 = upper_32_bits(dma_coherent_handle);
4057 acb->cdb_phyaddr_hi32 = cdb_phyaddr_hi32;
4058 acb->cdb_phyadd_hipart = ((uint64_t)cdb_phyaddr_hi32) << 32;
4059 /*
4060 ***********************************************************************
4061 ** if adapter type B, set window of "post command Q"
4062 ***********************************************************************
4063 */
4064 switch (acb->adapter_type) {
4065
4066 case ACB_ADAPTER_TYPE_A: {
4067 if (cdb_phyaddr_hi32 != 0) {
4068 struct MessageUnit_A __iomem *reg = acb->pmuA;
4069 writel(ARCMSR_SIGNATURE_SET_CONFIG, \
4070 ®->message_rwbuffer[0]);
4071 writel(cdb_phyaddr_hi32, ®->message_rwbuffer[1]);
4072 writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, \
4073 ®->inbound_msgaddr0);
4074 if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
4075 printk(KERN_NOTICE "arcmsr%d: ""set ccb high \
4076 part physical address timeout\n",
4077 acb->host->host_no);
4078 return 1;
4079 }
4080 }
4081 }
4082 break;
4083
4084 case ACB_ADAPTER_TYPE_B: {
4085 uint32_t __iomem *rwbuffer;
4086
4087 struct MessageUnit_B *reg = acb->pmuB;
4088 reg->postq_index = 0;
4089 reg->doneq_index = 0;
4090 writel(ARCMSR_MESSAGE_SET_POST_WINDOW, reg->drv2iop_doorbell);
4091 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
4092 printk(KERN_NOTICE "arcmsr%d: cannot set driver mode\n", \
4093 acb->host->host_no);
4094 return 1;
4095 }
4096 rwbuffer = reg->message_rwbuffer;
4097 /* driver "set config" signature */
4098 writel(ARCMSR_SIGNATURE_SET_CONFIG, rwbuffer++);
4099 /* normal should be zero */
4100 writel(cdb_phyaddr_hi32, rwbuffer++);
4101 /* postQ size (256 + 8)*4 */
4102 writel(cdb_phyaddr, rwbuffer++);
4103 /* doneQ size (256 + 8)*4 */
4104 writel(cdb_phyaddr + 1056, rwbuffer++);
4105 /* ccb maxQ size must be --> [(256 + 8)*4]*/
4106 writel(1056, rwbuffer);
4107
4108 writel(ARCMSR_MESSAGE_SET_CONFIG, reg->drv2iop_doorbell);
4109 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
4110 printk(KERN_NOTICE "arcmsr%d: 'set command Q window' \
4111 timeout \n",acb->host->host_no);
4112 return 1;
4113 }
4114 writel(ARCMSR_MESSAGE_START_DRIVER_MODE, reg->drv2iop_doorbell);
4115 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
4116 pr_err("arcmsr%d: can't set driver mode.\n",
4117 acb->host->host_no);
4118 return 1;
4119 }
4120 }
4121 break;
4122 case ACB_ADAPTER_TYPE_C: {
4123 struct MessageUnit_C __iomem *reg = acb->pmuC;
4124
4125 printk(KERN_NOTICE "arcmsr%d: cdb_phyaddr_hi32=0x%x\n",
4126 acb->adapter_index, cdb_phyaddr_hi32);
4127 writel(ARCMSR_SIGNATURE_SET_CONFIG, ®->msgcode_rwbuffer[0]);
4128 writel(cdb_phyaddr_hi32, ®->msgcode_rwbuffer[1]);
4129 writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, ®->inbound_msgaddr0);
4130 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell);
4131 if (!arcmsr_hbaC_wait_msgint_ready(acb)) {
4132 printk(KERN_NOTICE "arcmsr%d: 'set command Q window' \
4133 timeout \n", acb->host->host_no);
4134 return 1;
4135 }
4136 }
4137 break;
4138 case ACB_ADAPTER_TYPE_D: {
4139 uint32_t __iomem *rwbuffer;
4140 struct MessageUnit_D *reg = acb->pmuD;
4141 reg->postq_index = 0;
4142 reg->doneq_index = 0;
4143 rwbuffer = reg->msgcode_rwbuffer;
4144 writel(ARCMSR_SIGNATURE_SET_CONFIG, rwbuffer++);
4145 writel(cdb_phyaddr_hi32, rwbuffer++);
4146 writel(cdb_phyaddr, rwbuffer++);
4147 writel(cdb_phyaddr + (ARCMSR_MAX_ARC1214_POSTQUEUE *
4148 sizeof(struct InBound_SRB)), rwbuffer++);
4149 writel(0x100, rwbuffer);
4150 writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, reg->inbound_msgaddr0);
4151 if (!arcmsr_hbaD_wait_msgint_ready(acb)) {
4152 pr_notice("arcmsr%d: 'set command Q window' timeout\n",
4153 acb->host->host_no);
4154 return 1;
4155 }
4156 }
4157 break;
4158 case ACB_ADAPTER_TYPE_E: {
4159 struct MessageUnit_E __iomem *reg = acb->pmuE;
4160 writel(ARCMSR_SIGNATURE_SET_CONFIG, ®->msgcode_rwbuffer[0]);
4161 writel(ARCMSR_SIGNATURE_1884, ®->msgcode_rwbuffer[1]);
4162 writel(cdb_phyaddr, ®->msgcode_rwbuffer[2]);
4163 writel(cdb_phyaddr_hi32, ®->msgcode_rwbuffer[3]);
4164 writel(acb->ccbsize, ®->msgcode_rwbuffer[4]);
4165 writel(lower_32_bits(acb->dma_coherent_handle2), ®->msgcode_rwbuffer[5]);
4166 writel(upper_32_bits(acb->dma_coherent_handle2), ®->msgcode_rwbuffer[6]);
4167 writel(acb->ioqueue_size, ®->msgcode_rwbuffer[7]);
4168 writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, ®->inbound_msgaddr0);
4169 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
4170 writel(acb->out_doorbell, ®->iobound_doorbell);
4171 if (!arcmsr_hbaE_wait_msgint_ready(acb)) {
4172 pr_notice("arcmsr%d: 'set command Q window' timeout \n",
4173 acb->host->host_no);
4174 return 1;
4175 }
4176 }
4177 break;
4178 case ACB_ADAPTER_TYPE_F: {
4179 struct MessageUnit_F __iomem *reg = acb->pmuF;
4180
4181 acb->msgcode_rwbuffer[0] = ARCMSR_SIGNATURE_SET_CONFIG;
4182 acb->msgcode_rwbuffer[1] = ARCMSR_SIGNATURE_1886;
4183 acb->msgcode_rwbuffer[2] = cdb_phyaddr;
4184 acb->msgcode_rwbuffer[3] = cdb_phyaddr_hi32;
4185 acb->msgcode_rwbuffer[4] = acb->ccbsize;
4186 acb->msgcode_rwbuffer[5] = lower_32_bits(acb->dma_coherent_handle2);
4187 acb->msgcode_rwbuffer[6] = upper_32_bits(acb->dma_coherent_handle2);
4188 acb->msgcode_rwbuffer[7] = acb->completeQ_size;
4189 if (acb->xor_mega) {
4190 acb->msgcode_rwbuffer[8] = 0x455AA; //Linux init 2
4191 acb->msgcode_rwbuffer[9] = 0;
4192 acb->msgcode_rwbuffer[10] = lower_32_bits(acb->xorPhys);
4193 acb->msgcode_rwbuffer[11] = upper_32_bits(acb->xorPhys);
4194 }
4195 writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, ®->inbound_msgaddr0);
4196 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
4197 writel(acb->out_doorbell, ®->iobound_doorbell);
4198 if (!arcmsr_hbaE_wait_msgint_ready(acb)) {
4199 pr_notice("arcmsr%d: 'set command Q window' timeout\n",
4200 acb->host->host_no);
4201 return 1;
4202 }
4203 }
4204 break;
4205 }
4206 return 0;
4207 }
4208
arcmsr_wait_firmware_ready(struct AdapterControlBlock * acb)4209 static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb)
4210 {
4211 uint32_t firmware_state = 0;
4212 switch (acb->adapter_type) {
4213
4214 case ACB_ADAPTER_TYPE_A: {
4215 struct MessageUnit_A __iomem *reg = acb->pmuA;
4216 do {
4217 if (!(acb->acb_flags & ACB_F_IOP_INITED))
4218 msleep(20);
4219 firmware_state = readl(®->outbound_msgaddr1);
4220 } while ((firmware_state & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0);
4221 }
4222 break;
4223
4224 case ACB_ADAPTER_TYPE_B: {
4225 struct MessageUnit_B *reg = acb->pmuB;
4226 do {
4227 if (!(acb->acb_flags & ACB_F_IOP_INITED))
4228 msleep(20);
4229 firmware_state = readl(reg->iop2drv_doorbell);
4230 } while ((firmware_state & ARCMSR_MESSAGE_FIRMWARE_OK) == 0);
4231 writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell);
4232 }
4233 break;
4234 case ACB_ADAPTER_TYPE_C: {
4235 struct MessageUnit_C __iomem *reg = acb->pmuC;
4236 do {
4237 if (!(acb->acb_flags & ACB_F_IOP_INITED))
4238 msleep(20);
4239 firmware_state = readl(®->outbound_msgaddr1);
4240 } while ((firmware_state & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0);
4241 }
4242 break;
4243 case ACB_ADAPTER_TYPE_D: {
4244 struct MessageUnit_D *reg = acb->pmuD;
4245 do {
4246 if (!(acb->acb_flags & ACB_F_IOP_INITED))
4247 msleep(20);
4248 firmware_state = readl(reg->outbound_msgaddr1);
4249 } while ((firmware_state &
4250 ARCMSR_ARC1214_MESSAGE_FIRMWARE_OK) == 0);
4251 }
4252 break;
4253 case ACB_ADAPTER_TYPE_E:
4254 case ACB_ADAPTER_TYPE_F: {
4255 struct MessageUnit_E __iomem *reg = acb->pmuE;
4256 do {
4257 if (!(acb->acb_flags & ACB_F_IOP_INITED))
4258 msleep(20);
4259 firmware_state = readl(®->outbound_msgaddr1);
4260 } while ((firmware_state & ARCMSR_HBEMU_MESSAGE_FIRMWARE_OK) == 0);
4261 }
4262 break;
4263 }
4264 }
4265
arcmsr_request_device_map(struct timer_list * t)4266 static void arcmsr_request_device_map(struct timer_list *t)
4267 {
4268 struct AdapterControlBlock *acb = timer_container_of(acb, t,
4269 eternal_timer);
4270 if (acb->acb_flags & (ACB_F_MSG_GET_CONFIG | ACB_F_BUS_RESET | ACB_F_ABORT)) {
4271 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
4272 } else {
4273 acb->fw_flag = FW_NORMAL;
4274 switch (acb->adapter_type) {
4275 case ACB_ADAPTER_TYPE_A: {
4276 struct MessageUnit_A __iomem *reg = acb->pmuA;
4277 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0);
4278 break;
4279 }
4280 case ACB_ADAPTER_TYPE_B: {
4281 struct MessageUnit_B *reg = acb->pmuB;
4282 writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell);
4283 break;
4284 }
4285 case ACB_ADAPTER_TYPE_C: {
4286 struct MessageUnit_C __iomem *reg = acb->pmuC;
4287 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0);
4288 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, ®->inbound_doorbell);
4289 break;
4290 }
4291 case ACB_ADAPTER_TYPE_D: {
4292 struct MessageUnit_D *reg = acb->pmuD;
4293 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, reg->inbound_msgaddr0);
4294 break;
4295 }
4296 case ACB_ADAPTER_TYPE_E: {
4297 struct MessageUnit_E __iomem *reg = acb->pmuE;
4298 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0);
4299 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
4300 writel(acb->out_doorbell, ®->iobound_doorbell);
4301 break;
4302 }
4303 case ACB_ADAPTER_TYPE_F: {
4304 struct MessageUnit_F __iomem *reg = acb->pmuF;
4305 uint32_t outMsg1 = readl(®->outbound_msgaddr1);
4306
4307 if (!(outMsg1 & ARCMSR_HBFMU_MESSAGE_FIRMWARE_OK) ||
4308 (outMsg1 & ARCMSR_HBFMU_MESSAGE_NO_VOLUME_CHANGE))
4309 goto nxt6s;
4310 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0);
4311 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
4312 writel(acb->out_doorbell, ®->iobound_doorbell);
4313 break;
4314 }
4315 default:
4316 return;
4317 }
4318 acb->acb_flags |= ACB_F_MSG_GET_CONFIG;
4319 nxt6s:
4320 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
4321 }
4322 }
4323
arcmsr_hbaA_start_bgrb(struct AdapterControlBlock * acb)4324 static void arcmsr_hbaA_start_bgrb(struct AdapterControlBlock *acb)
4325 {
4326 struct MessageUnit_A __iomem *reg = acb->pmuA;
4327 acb->acb_flags |= ACB_F_MSG_START_BGRB;
4328 writel(ARCMSR_INBOUND_MESG0_START_BGRB, ®->inbound_msgaddr0);
4329 if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
4330 printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
4331 rebuild' timeout \n", acb->host->host_no);
4332 }
4333 }
4334
arcmsr_hbaB_start_bgrb(struct AdapterControlBlock * acb)4335 static void arcmsr_hbaB_start_bgrb(struct AdapterControlBlock *acb)
4336 {
4337 struct MessageUnit_B *reg = acb->pmuB;
4338 acb->acb_flags |= ACB_F_MSG_START_BGRB;
4339 writel(ARCMSR_MESSAGE_START_BGRB, reg->drv2iop_doorbell);
4340 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
4341 printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
4342 rebuild' timeout \n",acb->host->host_no);
4343 }
4344 }
4345
arcmsr_hbaC_start_bgrb(struct AdapterControlBlock * pACB)4346 static void arcmsr_hbaC_start_bgrb(struct AdapterControlBlock *pACB)
4347 {
4348 struct MessageUnit_C __iomem *phbcmu = pACB->pmuC;
4349 pACB->acb_flags |= ACB_F_MSG_START_BGRB;
4350 writel(ARCMSR_INBOUND_MESG0_START_BGRB, &phbcmu->inbound_msgaddr0);
4351 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &phbcmu->inbound_doorbell);
4352 if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
4353 printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
4354 rebuild' timeout \n", pACB->host->host_no);
4355 }
4356 return;
4357 }
4358
arcmsr_hbaD_start_bgrb(struct AdapterControlBlock * pACB)4359 static void arcmsr_hbaD_start_bgrb(struct AdapterControlBlock *pACB)
4360 {
4361 struct MessageUnit_D *pmu = pACB->pmuD;
4362
4363 pACB->acb_flags |= ACB_F_MSG_START_BGRB;
4364 writel(ARCMSR_INBOUND_MESG0_START_BGRB, pmu->inbound_msgaddr0);
4365 if (!arcmsr_hbaD_wait_msgint_ready(pACB)) {
4366 pr_notice("arcmsr%d: wait 'start adapter "
4367 "background rebuild' timeout\n", pACB->host->host_no);
4368 }
4369 }
4370
arcmsr_hbaE_start_bgrb(struct AdapterControlBlock * pACB)4371 static void arcmsr_hbaE_start_bgrb(struct AdapterControlBlock *pACB)
4372 {
4373 struct MessageUnit_E __iomem *pmu = pACB->pmuE;
4374
4375 pACB->acb_flags |= ACB_F_MSG_START_BGRB;
4376 writel(ARCMSR_INBOUND_MESG0_START_BGRB, &pmu->inbound_msgaddr0);
4377 pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
4378 writel(pACB->out_doorbell, &pmu->iobound_doorbell);
4379 if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
4380 pr_notice("arcmsr%d: wait 'start adapter "
4381 "background rebuild' timeout \n", pACB->host->host_no);
4382 }
4383 }
4384
arcmsr_start_adapter_bgrb(struct AdapterControlBlock * acb)4385 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb)
4386 {
4387 switch (acb->adapter_type) {
4388 case ACB_ADAPTER_TYPE_A:
4389 arcmsr_hbaA_start_bgrb(acb);
4390 break;
4391 case ACB_ADAPTER_TYPE_B:
4392 arcmsr_hbaB_start_bgrb(acb);
4393 break;
4394 case ACB_ADAPTER_TYPE_C:
4395 arcmsr_hbaC_start_bgrb(acb);
4396 break;
4397 case ACB_ADAPTER_TYPE_D:
4398 arcmsr_hbaD_start_bgrb(acb);
4399 break;
4400 case ACB_ADAPTER_TYPE_E:
4401 case ACB_ADAPTER_TYPE_F:
4402 arcmsr_hbaE_start_bgrb(acb);
4403 break;
4404 }
4405 }
4406
arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock * acb)4407 static void arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock *acb)
4408 {
4409 switch (acb->adapter_type) {
4410 case ACB_ADAPTER_TYPE_A: {
4411 struct MessageUnit_A __iomem *reg = acb->pmuA;
4412 uint32_t outbound_doorbell;
4413 /* empty doorbell Qbuffer if door bell ringed */
4414 outbound_doorbell = readl(®->outbound_doorbell);
4415 /*clear doorbell interrupt */
4416 writel(outbound_doorbell, ®->outbound_doorbell);
4417 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, ®->inbound_doorbell);
4418 }
4419 break;
4420
4421 case ACB_ADAPTER_TYPE_B: {
4422 struct MessageUnit_B *reg = acb->pmuB;
4423 uint32_t outbound_doorbell, i;
4424 writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
4425 writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell);
4426 /* let IOP know data has been read */
4427 for(i=0; i < 200; i++) {
4428 msleep(20);
4429 outbound_doorbell = readl(reg->iop2drv_doorbell);
4430 if( outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) {
4431 writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
4432 writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell);
4433 } else
4434 break;
4435 }
4436 }
4437 break;
4438 case ACB_ADAPTER_TYPE_C: {
4439 struct MessageUnit_C __iomem *reg = acb->pmuC;
4440 uint32_t outbound_doorbell, i;
4441 /* empty doorbell Qbuffer if door bell ringed */
4442 outbound_doorbell = readl(®->outbound_doorbell);
4443 writel(outbound_doorbell, ®->outbound_doorbell_clear);
4444 writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, ®->inbound_doorbell);
4445 for (i = 0; i < 200; i++) {
4446 msleep(20);
4447 outbound_doorbell = readl(®->outbound_doorbell);
4448 if (outbound_doorbell &
4449 ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) {
4450 writel(outbound_doorbell,
4451 ®->outbound_doorbell_clear);
4452 writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK,
4453 ®->inbound_doorbell);
4454 } else
4455 break;
4456 }
4457 }
4458 break;
4459 case ACB_ADAPTER_TYPE_D: {
4460 struct MessageUnit_D *reg = acb->pmuD;
4461 uint32_t outbound_doorbell, i;
4462 /* empty doorbell Qbuffer if door bell ringed */
4463 outbound_doorbell = readl(reg->outbound_doorbell);
4464 writel(outbound_doorbell, reg->outbound_doorbell);
4465 writel(ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ,
4466 reg->inbound_doorbell);
4467 for (i = 0; i < 200; i++) {
4468 msleep(20);
4469 outbound_doorbell = readl(reg->outbound_doorbell);
4470 if (outbound_doorbell &
4471 ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK) {
4472 writel(outbound_doorbell,
4473 reg->outbound_doorbell);
4474 writel(ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ,
4475 reg->inbound_doorbell);
4476 } else
4477 break;
4478 }
4479 }
4480 break;
4481 case ACB_ADAPTER_TYPE_E:
4482 case ACB_ADAPTER_TYPE_F: {
4483 struct MessageUnit_E __iomem *reg = acb->pmuE;
4484 uint32_t i, tmp;
4485
4486 acb->in_doorbell = readl(®->iobound_doorbell);
4487 writel(0, ®->host_int_status); /*clear interrupt*/
4488 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK;
4489 writel(acb->out_doorbell, ®->iobound_doorbell);
4490 for(i=0; i < 200; i++) {
4491 msleep(20);
4492 tmp = acb->in_doorbell;
4493 acb->in_doorbell = readl(®->iobound_doorbell);
4494 if((tmp ^ acb->in_doorbell) & ARCMSR_HBEMU_IOP2DRV_DATA_WRITE_OK) {
4495 writel(0, ®->host_int_status); /*clear interrupt*/
4496 acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK;
4497 writel(acb->out_doorbell, ®->iobound_doorbell);
4498 } else
4499 break;
4500 }
4501 }
4502 break;
4503 }
4504 }
4505
arcmsr_enable_eoi_mode(struct AdapterControlBlock * acb)4506 static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb)
4507 {
4508 switch (acb->adapter_type) {
4509 case ACB_ADAPTER_TYPE_A:
4510 return;
4511 case ACB_ADAPTER_TYPE_B:
4512 {
4513 struct MessageUnit_B *reg = acb->pmuB;
4514 writel(ARCMSR_MESSAGE_ACTIVE_EOI_MODE, reg->drv2iop_doorbell);
4515 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
4516 printk(KERN_NOTICE "ARCMSR IOP enables EOI_MODE TIMEOUT");
4517 return;
4518 }
4519 }
4520 break;
4521 case ACB_ADAPTER_TYPE_C:
4522 return;
4523 }
4524 return;
4525 }
4526
arcmsr_hardware_reset(struct AdapterControlBlock * acb)4527 static void arcmsr_hardware_reset(struct AdapterControlBlock *acb)
4528 {
4529 uint8_t value[64];
4530 int i, count = 0;
4531 struct MessageUnit_A __iomem *pmuA = acb->pmuA;
4532 struct MessageUnit_C __iomem *pmuC = acb->pmuC;
4533 struct MessageUnit_D *pmuD = acb->pmuD;
4534
4535 /* backup pci config data */
4536 printk(KERN_NOTICE "arcmsr%d: executing hw bus reset .....\n", acb->host->host_no);
4537 for (i = 0; i < 64; i++) {
4538 pci_read_config_byte(acb->pdev, i, &value[i]);
4539 }
4540 /* hardware reset signal */
4541 if (acb->dev_id == 0x1680) {
4542 writel(ARCMSR_ARC1680_BUS_RESET, &pmuA->reserved1[0]);
4543 } else if (acb->dev_id == 0x1880) {
4544 do {
4545 count++;
4546 writel(0xF, &pmuC->write_sequence);
4547 writel(0x4, &pmuC->write_sequence);
4548 writel(0xB, &pmuC->write_sequence);
4549 writel(0x2, &pmuC->write_sequence);
4550 writel(0x7, &pmuC->write_sequence);
4551 writel(0xD, &pmuC->write_sequence);
4552 } while (((readl(&pmuC->host_diagnostic) & ARCMSR_ARC1880_DiagWrite_ENABLE) == 0) && (count < 5));
4553 writel(ARCMSR_ARC1880_RESET_ADAPTER, &pmuC->host_diagnostic);
4554 } else if (acb->dev_id == 0x1884) {
4555 struct MessageUnit_E __iomem *pmuE = acb->pmuE;
4556 do {
4557 count++;
4558 writel(0x4, &pmuE->write_sequence_3xxx);
4559 writel(0xB, &pmuE->write_sequence_3xxx);
4560 writel(0x2, &pmuE->write_sequence_3xxx);
4561 writel(0x7, &pmuE->write_sequence_3xxx);
4562 writel(0xD, &pmuE->write_sequence_3xxx);
4563 mdelay(10);
4564 } while (((readl(&pmuE->host_diagnostic_3xxx) &
4565 ARCMSR_ARC1884_DiagWrite_ENABLE) == 0) && (count < 5));
4566 writel(ARCMSR_ARC188X_RESET_ADAPTER, &pmuE->host_diagnostic_3xxx);
4567 } else if (acb->dev_id == 0x1214) {
4568 writel(0x20, pmuD->reset_request);
4569 } else {
4570 pci_write_config_byte(acb->pdev, 0x84, 0x20);
4571 }
4572 msleep(2000);
4573 /* write back pci config data */
4574 for (i = 0; i < 64; i++) {
4575 pci_write_config_byte(acb->pdev, i, value[i]);
4576 }
4577 msleep(1000);
4578 return;
4579 }
4580
arcmsr_reset_in_progress(struct AdapterControlBlock * acb)4581 static bool arcmsr_reset_in_progress(struct AdapterControlBlock *acb)
4582 {
4583 bool rtn = true;
4584
4585 switch(acb->adapter_type) {
4586 case ACB_ADAPTER_TYPE_A:{
4587 struct MessageUnit_A __iomem *reg = acb->pmuA;
4588 rtn = ((readl(®->outbound_msgaddr1) &
4589 ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) ? true : false;
4590 }
4591 break;
4592 case ACB_ADAPTER_TYPE_B:{
4593 struct MessageUnit_B *reg = acb->pmuB;
4594 rtn = ((readl(reg->iop2drv_doorbell) &
4595 ARCMSR_MESSAGE_FIRMWARE_OK) == 0) ? true : false;
4596 }
4597 break;
4598 case ACB_ADAPTER_TYPE_C:{
4599 struct MessageUnit_C __iomem *reg = acb->pmuC;
4600 rtn = (readl(®->host_diagnostic) & 0x04) ? true : false;
4601 }
4602 break;
4603 case ACB_ADAPTER_TYPE_D:{
4604 struct MessageUnit_D *reg = acb->pmuD;
4605 rtn = ((readl(reg->sample_at_reset) & 0x80) == 0) ?
4606 true : false;
4607 }
4608 break;
4609 case ACB_ADAPTER_TYPE_E:
4610 case ACB_ADAPTER_TYPE_F:{
4611 struct MessageUnit_E __iomem *reg = acb->pmuE;
4612 rtn = (readl(®->host_diagnostic_3xxx) &
4613 ARCMSR_ARC188X_RESET_ADAPTER) ? true : false;
4614 }
4615 break;
4616 }
4617 return rtn;
4618 }
4619
arcmsr_iop_init(struct AdapterControlBlock * acb)4620 static void arcmsr_iop_init(struct AdapterControlBlock *acb)
4621 {
4622 uint32_t intmask_org;
4623 /* disable all outbound interrupt */
4624 intmask_org = arcmsr_disable_outbound_ints(acb);
4625 arcmsr_wait_firmware_ready(acb);
4626 arcmsr_iop_confirm(acb);
4627 /*start background rebuild*/
4628 arcmsr_start_adapter_bgrb(acb);
4629 /* empty doorbell Qbuffer if door bell ringed */
4630 arcmsr_clear_doorbell_queue_buffer(acb);
4631 arcmsr_enable_eoi_mode(acb);
4632 /* enable outbound Post Queue,outbound doorbell Interrupt */
4633 arcmsr_enable_outbound_ints(acb, intmask_org);
4634 acb->acb_flags |= ACB_F_IOP_INITED;
4635 }
4636
arcmsr_iop_reset(struct AdapterControlBlock * acb)4637 static uint8_t arcmsr_iop_reset(struct AdapterControlBlock *acb)
4638 {
4639 struct CommandControlBlock *ccb;
4640 uint32_t intmask_org;
4641 uint8_t rtnval = 0x00;
4642 int i = 0;
4643 unsigned long flags;
4644
4645 if (atomic_read(&acb->ccboutstandingcount) != 0) {
4646 /* disable all outbound interrupt */
4647 intmask_org = arcmsr_disable_outbound_ints(acb);
4648 /* talk to iop 331 outstanding command aborted */
4649 rtnval = arcmsr_abort_allcmd(acb);
4650 /* clear all outbound posted Q */
4651 arcmsr_done4abort_postqueue(acb);
4652 for (i = 0; i < acb->maxFreeCCB; i++) {
4653 ccb = acb->pccb_pool[i];
4654 if (ccb->startdone == ARCMSR_CCB_START) {
4655 scsi_dma_unmap(ccb->pcmd);
4656 ccb->startdone = ARCMSR_CCB_DONE;
4657 ccb->ccb_flags = 0;
4658 spin_lock_irqsave(&acb->ccblist_lock, flags);
4659 list_add_tail(&ccb->list, &acb->ccb_free_list);
4660 spin_unlock_irqrestore(&acb->ccblist_lock, flags);
4661 }
4662 }
4663 atomic_set(&acb->ccboutstandingcount, 0);
4664 /* enable all outbound interrupt */
4665 arcmsr_enable_outbound_ints(acb, intmask_org);
4666 return rtnval;
4667 }
4668 return rtnval;
4669 }
4670
arcmsr_bus_reset(struct scsi_cmnd * cmd)4671 static int arcmsr_bus_reset(struct scsi_cmnd *cmd)
4672 {
4673 struct AdapterControlBlock *acb;
4674 int retry_count = 0;
4675 int rtn = FAILED;
4676 acb = (struct AdapterControlBlock *) cmd->device->host->hostdata;
4677 if (acb->acb_flags & ACB_F_ADAPTER_REMOVED)
4678 return SUCCESS;
4679 pr_notice("arcmsr: executing bus reset eh.....num_resets = %d,"
4680 " num_aborts = %d \n", acb->num_resets, acb->num_aborts);
4681 acb->num_resets++;
4682
4683 if (acb->acb_flags & ACB_F_BUS_RESET) {
4684 long timeout;
4685 pr_notice("arcmsr: there is a bus reset eh proceeding...\n");
4686 timeout = wait_event_timeout(wait_q, (acb->acb_flags
4687 & ACB_F_BUS_RESET) == 0, 220 * HZ);
4688 if (timeout)
4689 return SUCCESS;
4690 }
4691 acb->acb_flags |= ACB_F_BUS_RESET;
4692 if (!arcmsr_iop_reset(acb)) {
4693 arcmsr_hardware_reset(acb);
4694 acb->acb_flags &= ~ACB_F_IOP_INITED;
4695 wait_reset_done:
4696 ssleep(ARCMSR_SLEEPTIME);
4697 if (arcmsr_reset_in_progress(acb)) {
4698 if (retry_count > ARCMSR_RETRYCOUNT) {
4699 acb->fw_flag = FW_DEADLOCK;
4700 pr_notice("arcmsr%d: waiting for hw bus reset"
4701 " return, RETRY TERMINATED!!\n",
4702 acb->host->host_no);
4703 return FAILED;
4704 }
4705 retry_count++;
4706 goto wait_reset_done;
4707 }
4708 arcmsr_iop_init(acb);
4709 acb->fw_flag = FW_NORMAL;
4710 mod_timer(&acb->eternal_timer, jiffies +
4711 msecs_to_jiffies(6 * HZ));
4712 acb->acb_flags &= ~ACB_F_BUS_RESET;
4713 rtn = SUCCESS;
4714 pr_notice("arcmsr: scsi bus reset eh returns with success\n");
4715 } else {
4716 acb->acb_flags &= ~ACB_F_BUS_RESET;
4717 acb->fw_flag = FW_NORMAL;
4718 mod_timer(&acb->eternal_timer, jiffies +
4719 msecs_to_jiffies(6 * HZ));
4720 rtn = SUCCESS;
4721 }
4722 return rtn;
4723 }
4724
arcmsr_abort_one_cmd(struct AdapterControlBlock * acb,struct CommandControlBlock * ccb)4725 static int arcmsr_abort_one_cmd(struct AdapterControlBlock *acb,
4726 struct CommandControlBlock *ccb)
4727 {
4728 int rtn;
4729 rtn = arcmsr_polling_ccbdone(acb, ccb);
4730 return rtn;
4731 }
4732
arcmsr_abort(struct scsi_cmnd * cmd)4733 static int arcmsr_abort(struct scsi_cmnd *cmd)
4734 {
4735 struct AdapterControlBlock *acb =
4736 (struct AdapterControlBlock *)cmd->device->host->hostdata;
4737 int i = 0;
4738 int rtn = FAILED;
4739 uint32_t intmask_org;
4740
4741 if (acb->acb_flags & ACB_F_ADAPTER_REMOVED)
4742 return SUCCESS;
4743 printk(KERN_NOTICE
4744 "arcmsr%d: abort device command of scsi id = %d lun = %d\n",
4745 acb->host->host_no, cmd->device->id, (u32)cmd->device->lun);
4746 acb->acb_flags |= ACB_F_ABORT;
4747 acb->num_aborts++;
4748 /*
4749 ************************************************
4750 ** the all interrupt service routine is locked
4751 ** we need to handle it as soon as possible and exit
4752 ************************************************
4753 */
4754 if (!atomic_read(&acb->ccboutstandingcount)) {
4755 acb->acb_flags &= ~ACB_F_ABORT;
4756 return rtn;
4757 }
4758
4759 intmask_org = arcmsr_disable_outbound_ints(acb);
4760 for (i = 0; i < acb->maxFreeCCB; i++) {
4761 struct CommandControlBlock *ccb = acb->pccb_pool[i];
4762 if (ccb->startdone == ARCMSR_CCB_START && ccb->pcmd == cmd) {
4763 ccb->startdone = ARCMSR_CCB_ABORTED;
4764 rtn = arcmsr_abort_one_cmd(acb, ccb);
4765 break;
4766 }
4767 }
4768 acb->acb_flags &= ~ACB_F_ABORT;
4769 arcmsr_enable_outbound_ints(acb, intmask_org);
4770 return rtn;
4771 }
4772
arcmsr_info(struct Scsi_Host * host)4773 static const char *arcmsr_info(struct Scsi_Host *host)
4774 {
4775 struct AdapterControlBlock *acb =
4776 (struct AdapterControlBlock *) host->hostdata;
4777 static char buf[256];
4778 char *type;
4779 int raid6 = 1;
4780 switch (acb->pdev->device) {
4781 case PCI_DEVICE_ID_ARECA_1110:
4782 case PCI_DEVICE_ID_ARECA_1200:
4783 case PCI_DEVICE_ID_ARECA_1202:
4784 case PCI_DEVICE_ID_ARECA_1210:
4785 raid6 = 0;
4786 fallthrough;
4787 case PCI_DEVICE_ID_ARECA_1120:
4788 case PCI_DEVICE_ID_ARECA_1130:
4789 case PCI_DEVICE_ID_ARECA_1160:
4790 case PCI_DEVICE_ID_ARECA_1170:
4791 case PCI_DEVICE_ID_ARECA_1201:
4792 case PCI_DEVICE_ID_ARECA_1203:
4793 case PCI_DEVICE_ID_ARECA_1220:
4794 case PCI_DEVICE_ID_ARECA_1230:
4795 case PCI_DEVICE_ID_ARECA_1260:
4796 case PCI_DEVICE_ID_ARECA_1270:
4797 case PCI_DEVICE_ID_ARECA_1280:
4798 type = "SATA";
4799 break;
4800 case PCI_DEVICE_ID_ARECA_1214:
4801 case PCI_DEVICE_ID_ARECA_1380:
4802 case PCI_DEVICE_ID_ARECA_1381:
4803 case PCI_DEVICE_ID_ARECA_1680:
4804 case PCI_DEVICE_ID_ARECA_1681:
4805 case PCI_DEVICE_ID_ARECA_1880:
4806 case PCI_DEVICE_ID_ARECA_1883:
4807 case PCI_DEVICE_ID_ARECA_1884:
4808 type = "SAS/SATA";
4809 break;
4810 case PCI_DEVICE_ID_ARECA_1886_0:
4811 case PCI_DEVICE_ID_ARECA_1886:
4812 type = "NVMe/SAS/SATA";
4813 break;
4814 default:
4815 type = "unknown";
4816 raid6 = 0;
4817 break;
4818 }
4819 sprintf(buf, "Areca %s RAID Controller %s\narcmsr version %s\n",
4820 type, raid6 ? "(RAID6 capable)" : "", ARCMSR_DRIVER_VERSION);
4821 return buf;
4822 }
4823