1 /*
2  *    Disk Array driver for Compaq SMART2 Controllers
3  *    Copyright 1998 Compaq Computer Corporation
4  *
5  *    This program is free software; you can redistribute it and/or modify
6  *    it under the terms of the GNU General Public License as published by
7  *    the Free Software Foundation; either version 2 of the License, or
8  *    (at your option) any later version.
9  *
10  *    This program is distributed in the hope that it will be useful,
11  *    but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13  *    NON INFRINGEMENT.  See the GNU General Public License for more details.
14  *
15  *    You should have received a copy of the GNU General Public License
16  *    along with this program; if not, write to the Free Software
17  *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18  *
19  *    Questions/Comments/Bugfixes to iss_storagedev@hp.com
20  *
21  */
22 #include <linux/module.h>
23 #include <linux/types.h>
24 #include <linux/pci.h>
25 #include <linux/bio.h>
26 #include <linux/interrupt.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
30 #include <linux/major.h>
31 #include <linux/fs.h>
32 #include <linux/blkpg.h>
33 #include <linux/timer.h>
34 #include <linux/proc_fs.h>
35 #include <linux/seq_file.h>
36 #include <linux/init.h>
37 #include <linux/hdreg.h>
38 #include <linux/mutex.h>
39 #include <linux/spinlock.h>
40 #include <linux/blkdev.h>
41 #include <linux/genhd.h>
42 #include <linux/scatterlist.h>
43 #include <asm/uaccess.h>
44 #include <asm/io.h>
45 
46 
47 #define SMART2_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
48 
49 #define DRIVER_NAME "Compaq SMART2 Driver (v 2.6.0)"
50 #define DRIVER_VERSION SMART2_DRIVER_VERSION(2,6,0)
51 
52 /* Embedded module documentation macros - see modules.h */
53 /* Original author Chris Frantz - Compaq Computer Corporation */
54 MODULE_AUTHOR("Compaq Computer Corporation");
55 MODULE_DESCRIPTION("Driver for Compaq Smart2 Array Controllers version 2.6.0");
56 MODULE_LICENSE("GPL");
57 
58 #include "cpqarray.h"
59 #include "ida_cmd.h"
60 #include "smart1,2.h"
61 #include "ida_ioctl.h"
62 
63 #define READ_AHEAD	128
64 #define NR_CMDS		128 /* This could probably go as high as ~400 */
65 
66 #define MAX_CTLR	8
67 #define CTLR_SHIFT	8
68 
69 #define CPQARRAY_DMA_MASK	0xFFFFFFFF	/* 32 bit DMA */
70 
71 static DEFINE_MUTEX(cpqarray_mutex);
72 static int nr_ctlr;
73 static ctlr_info_t *hba[MAX_CTLR];
74 
75 static int eisa[8];
76 
77 #define NR_PRODUCTS ARRAY_SIZE(products)
78 
79 /*  board_id = Subsystem Device ID & Vendor ID
80  *  product = Marketing Name for the board
81  *  access = Address of the struct of function pointers
82  */
83 static struct board_type products[] = {
84 	{ 0x0040110E, "IDA",			&smart1_access },
85 	{ 0x0140110E, "IDA-2",			&smart1_access },
86 	{ 0x1040110E, "IAES",			&smart1_access },
87 	{ 0x2040110E, "SMART",			&smart1_access },
88 	{ 0x3040110E, "SMART-2/E",		&smart2e_access },
89 	{ 0x40300E11, "SMART-2/P",		&smart2_access },
90 	{ 0x40310E11, "SMART-2SL",		&smart2_access },
91 	{ 0x40320E11, "Smart Array 3200",	&smart2_access },
92 	{ 0x40330E11, "Smart Array 3100ES",	&smart2_access },
93 	{ 0x40340E11, "Smart Array 221",	&smart2_access },
94 	{ 0x40400E11, "Integrated Array",	&smart4_access },
95 	{ 0x40480E11, "Compaq Raid LC2",        &smart4_access },
96 	{ 0x40500E11, "Smart Array 4200",	&smart4_access },
97 	{ 0x40510E11, "Smart Array 4250ES",	&smart4_access },
98 	{ 0x40580E11, "Smart Array 431",	&smart4_access },
99 };
100 
101 /* define the PCI info for the PCI cards this driver can control */
102 static const struct pci_device_id cpqarray_pci_device_id[] =
103 {
104 	{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_COMPAQ_42XX,
105 		0x0E11, 0x4058, 0, 0, 0},       /* SA431 */
106 	{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_COMPAQ_42XX,
107 		0x0E11, 0x4051, 0, 0, 0},      /* SA4250ES */
108 	{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_COMPAQ_42XX,
109 		0x0E11, 0x4050, 0, 0, 0},      /* SA4200 */
110 	{ PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C1510,
111 		0x0E11, 0x4048, 0, 0, 0},       /* LC2 */
112 	{ PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C1510,
113 		0x0E11, 0x4040, 0, 0, 0},      /* Integrated Array */
114 	{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
115 		0x0E11, 0x4034, 0, 0, 0},       /* SA 221 */
116 	{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
117 		0x0E11, 0x4033, 0, 0, 0},       /* SA 3100ES*/
118 	{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
119 		0x0E11, 0x4032, 0, 0, 0},       /* SA 3200*/
120 	{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
121 		0x0E11, 0x4031, 0, 0, 0},       /* SA 2SL*/
122 	{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
123 		0x0E11, 0x4030, 0, 0, 0},       /* SA 2P */
124 	{ 0 }
125 };
126 
127 MODULE_DEVICE_TABLE(pci, cpqarray_pci_device_id);
128 
129 static struct gendisk *ida_gendisk[MAX_CTLR][NWD];
130 
131 /* Debug... */
132 #define DBG(s)	do { s } while(0)
133 /* Debug (general info)... */
134 #define DBGINFO(s) do { } while(0)
135 /* Debug Paranoid... */
136 #define DBGP(s)  do { } while(0)
137 /* Debug Extra Paranoid... */
138 #define DBGPX(s) do { } while(0)
139 
140 static int cpqarray_pci_init(ctlr_info_t *c, struct pci_dev *pdev);
141 static void __iomem *remap_pci_mem(ulong base, ulong size);
142 static int cpqarray_eisa_detect(void);
143 static int pollcomplete(int ctlr);
144 static void getgeometry(int ctlr);
145 static void start_fwbk(int ctlr);
146 
147 static cmdlist_t * cmd_alloc(ctlr_info_t *h, int get_from_pool);
148 static void cmd_free(ctlr_info_t *h, cmdlist_t *c, int got_from_pool);
149 
150 static void free_hba(int i);
151 static int alloc_cpqarray_hba(void);
152 
153 static int sendcmd(
154 	__u8	cmd,
155 	int	ctlr,
156 	void	*buff,
157 	size_t	size,
158 	unsigned int blk,
159 	unsigned int blkcnt,
160 	unsigned int log_unit );
161 
162 static int ida_unlocked_open(struct block_device *bdev, fmode_t mode);
163 static int ida_release(struct gendisk *disk, fmode_t mode);
164 static int ida_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg);
165 static int ida_getgeo(struct block_device *bdev, struct hd_geometry *geo);
166 static int ida_ctlr_ioctl(ctlr_info_t *h, int dsk, ida_ioctl_t *io);
167 
168 static void do_ida_request(struct request_queue *q);
169 static void start_io(ctlr_info_t *h);
170 
171 static inline void addQ(cmdlist_t **Qptr, cmdlist_t *c);
172 static inline cmdlist_t *removeQ(cmdlist_t **Qptr, cmdlist_t *c);
173 static inline void complete_command(cmdlist_t *cmd, int timeout);
174 
175 static irqreturn_t do_ida_intr(int irq, void *dev_id);
176 static void ida_timer(unsigned long tdata);
177 static int ida_revalidate(struct gendisk *disk);
178 static int revalidate_allvol(ctlr_info_t *host);
179 static int cpqarray_register_ctlr(int ctlr, struct pci_dev *pdev);
180 
181 #ifdef CONFIG_PROC_FS
182 static void ida_procinit(int i);
183 #else
ida_procinit(int i)184 static void ida_procinit(int i) {}
185 #endif
186 
get_drv(struct gendisk * disk)187 static inline drv_info_t *get_drv(struct gendisk *disk)
188 {
189 	return disk->private_data;
190 }
191 
get_host(struct gendisk * disk)192 static inline ctlr_info_t *get_host(struct gendisk *disk)
193 {
194 	return disk->queue->queuedata;
195 }
196 
197 
198 static const struct block_device_operations ida_fops  = {
199 	.owner		= THIS_MODULE,
200 	.open		= ida_unlocked_open,
201 	.release	= ida_release,
202 	.ioctl		= ida_ioctl,
203 	.getgeo		= ida_getgeo,
204 	.revalidate_disk= ida_revalidate,
205 };
206 
207 
208 #ifdef CONFIG_PROC_FS
209 
210 static struct proc_dir_entry *proc_array;
211 static const struct file_operations ida_proc_fops;
212 
213 /*
214  * Get us a file in /proc/array that says something about each controller.
215  * Create /proc/array if it doesn't exist yet.
216  */
ida_procinit(int i)217 static void __init ida_procinit(int i)
218 {
219 	if (proc_array == NULL) {
220 		proc_array = proc_mkdir("driver/cpqarray", NULL);
221 		if (!proc_array) return;
222 	}
223 
224 	proc_create_data(hba[i]->devname, 0, proc_array, &ida_proc_fops, hba[i]);
225 }
226 
227 /*
228  * Report information about this controller.
229  */
ida_proc_show(struct seq_file * m,void * v)230 static int ida_proc_show(struct seq_file *m, void *v)
231 {
232 	int i, ctlr;
233 	ctlr_info_t *h = (ctlr_info_t*)m->private;
234 	drv_info_t *drv;
235 #ifdef CPQ_PROC_PRINT_QUEUES
236 	cmdlist_t *c;
237 	unsigned long flags;
238 #endif
239 
240 	ctlr = h->ctlr;
241 	seq_printf(m, "%s:  Compaq %s Controller\n"
242 		"       Board ID: 0x%08lx\n"
243 		"       Firmware Revision: %c%c%c%c\n"
244 		"       Controller Sig: 0x%08lx\n"
245 		"       Memory Address: 0x%08lx\n"
246 		"       I/O Port: 0x%04x\n"
247 		"       IRQ: %d\n"
248 		"       Logical drives: %d\n"
249 		"       Physical drives: %d\n\n"
250 		"       Current Q depth: %d\n"
251 		"       Max Q depth since init: %d\n\n",
252 		h->devname,
253 		h->product_name,
254 		(unsigned long)h->board_id,
255 		h->firm_rev[0], h->firm_rev[1], h->firm_rev[2], h->firm_rev[3],
256 		(unsigned long)h->ctlr_sig, (unsigned long)h->vaddr,
257 		(unsigned int) h->io_mem_addr, (unsigned int)h->intr,
258 		h->log_drives, h->phys_drives,
259 		h->Qdepth, h->maxQsinceinit);
260 
261 	seq_puts(m, "Logical Drive Info:\n");
262 
263 	for(i=0; i<h->log_drives; i++) {
264 		drv = &h->drv[i];
265 		seq_printf(m, "ida/c%dd%d: blksz=%d nr_blks=%d\n",
266 				ctlr, i, drv->blk_size, drv->nr_blks);
267 	}
268 
269 #ifdef CPQ_PROC_PRINT_QUEUES
270 	spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
271 	seq_puts(m, "\nCurrent Queues:\n");
272 
273 	c = h->reqQ;
274 	seq_printf(m, "reqQ = %p", c);
275 	if (c) c=c->next;
276 	while(c && c != h->reqQ) {
277 		seq_printf(m, "->%p", c);
278 		c=c->next;
279 	}
280 
281 	c = h->cmpQ;
282 	seq_printf(m, "\ncmpQ = %p", c);
283 	if (c) c=c->next;
284 	while(c && c != h->cmpQ) {
285 		seq_printf(m, "->%p", c);
286 		c=c->next;
287 	}
288 
289 	seq_putc(m, '\n');
290 	spin_unlock_irqrestore(IDA_LOCK(h->ctlr), flags);
291 #endif
292 	seq_printf(m, "nr_allocs = %d\nnr_frees = %d\n",
293 			h->nr_allocs, h->nr_frees);
294 	return 0;
295 }
296 
ida_proc_open(struct inode * inode,struct file * file)297 static int ida_proc_open(struct inode *inode, struct file *file)
298 {
299 	return single_open(file, ida_proc_show, PDE(inode)->data);
300 }
301 
302 static const struct file_operations ida_proc_fops = {
303 	.owner		= THIS_MODULE,
304 	.open		= ida_proc_open,
305 	.read		= seq_read,
306 	.llseek		= seq_lseek,
307 	.release	= single_release,
308 };
309 #endif /* CONFIG_PROC_FS */
310 
311 module_param_array(eisa, int, NULL, 0);
312 
release_io_mem(ctlr_info_t * c)313 static void release_io_mem(ctlr_info_t *c)
314 {
315 	/* if IO mem was not protected do nothing */
316 	if( c->io_mem_addr == 0)
317 		return;
318 	release_region(c->io_mem_addr, c->io_mem_length);
319 	c->io_mem_addr = 0;
320 	c->io_mem_length = 0;
321 }
322 
cpqarray_remove_one(int i)323 static void __devexit cpqarray_remove_one(int i)
324 {
325 	int j;
326 	char buff[4];
327 
328 	/* sendcmd will turn off interrupt, and send the flush...
329 	 * To write all data in the battery backed cache to disks
330 	 * no data returned, but don't want to send NULL to sendcmd */
331 	if( sendcmd(FLUSH_CACHE, i, buff, 4, 0, 0, 0))
332 	{
333 		printk(KERN_WARNING "Unable to flush cache on controller %d\n",
334 				i);
335 	}
336 	free_irq(hba[i]->intr, hba[i]);
337 	iounmap(hba[i]->vaddr);
338 	unregister_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname);
339 	del_timer(&hba[i]->timer);
340 	remove_proc_entry(hba[i]->devname, proc_array);
341 	pci_free_consistent(hba[i]->pci_dev,
342 			NR_CMDS * sizeof(cmdlist_t), (hba[i]->cmd_pool),
343 			hba[i]->cmd_pool_dhandle);
344 	kfree(hba[i]->cmd_pool_bits);
345 	for(j = 0; j < NWD; j++) {
346 		if (ida_gendisk[i][j]->flags & GENHD_FL_UP)
347 			del_gendisk(ida_gendisk[i][j]);
348 		put_disk(ida_gendisk[i][j]);
349 	}
350 	blk_cleanup_queue(hba[i]->queue);
351 	release_io_mem(hba[i]);
352 	free_hba(i);
353 }
354 
cpqarray_remove_one_pci(struct pci_dev * pdev)355 static void __devexit cpqarray_remove_one_pci (struct pci_dev *pdev)
356 {
357 	int i;
358 	ctlr_info_t *tmp_ptr;
359 
360 	if (pci_get_drvdata(pdev) == NULL) {
361 		printk( KERN_ERR "cpqarray: Unable to remove device \n");
362 		return;
363 	}
364 
365 	tmp_ptr = pci_get_drvdata(pdev);
366 	i = tmp_ptr->ctlr;
367 	if (hba[i] == NULL) {
368 		printk(KERN_ERR "cpqarray: controller %d appears to have"
369 			"already been removed \n", i);
370 		return;
371         }
372 	pci_set_drvdata(pdev, NULL);
373 
374 	cpqarray_remove_one(i);
375 }
376 
377 /* removing an instance that was not removed automatically..
378  * must be an eisa card.
379  */
cpqarray_remove_one_eisa(int i)380 static void __devexit cpqarray_remove_one_eisa (int i)
381 {
382 	if (hba[i] == NULL) {
383 		printk(KERN_ERR "cpqarray: controller %d appears to have"
384 			"already been removed \n", i);
385 		return;
386         }
387 	cpqarray_remove_one(i);
388 }
389 
390 /* pdev is NULL for eisa */
cpqarray_register_ctlr(int i,struct pci_dev * pdev)391 static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
392 {
393 	struct request_queue *q;
394 	int j;
395 
396 	/*
397 	 * register block devices
398 	 * Find disks and fill in structs
399 	 * Get an interrupt, set the Q depth and get into /proc
400 	 */
401 
402 	/* If this successful it should insure that we are the only */
403 	/* instance of the driver */
404 	if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
405 		goto Enomem4;
406 	}
407 	hba[i]->access.set_intr_mask(hba[i], 0);
408 	if (request_irq(hba[i]->intr, do_ida_intr,
409 		IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
410 	{
411 		printk(KERN_ERR "cpqarray: Unable to get irq %d for %s\n",
412 				hba[i]->intr, hba[i]->devname);
413 		goto Enomem3;
414 	}
415 
416 	for (j=0; j<NWD; j++) {
417 		ida_gendisk[i][j] = alloc_disk(1 << NWD_SHIFT);
418 		if (!ida_gendisk[i][j])
419 			goto Enomem2;
420 	}
421 
422 	hba[i]->cmd_pool = pci_alloc_consistent(
423 		hba[i]->pci_dev, NR_CMDS * sizeof(cmdlist_t),
424 		&(hba[i]->cmd_pool_dhandle));
425 	hba[i]->cmd_pool_bits = kcalloc(
426 		DIV_ROUND_UP(NR_CMDS, BITS_PER_LONG), sizeof(unsigned long),
427 		GFP_KERNEL);
428 
429 	if (!hba[i]->cmd_pool_bits || !hba[i]->cmd_pool)
430 			goto Enomem1;
431 
432 	memset(hba[i]->cmd_pool, 0, NR_CMDS * sizeof(cmdlist_t));
433 	printk(KERN_INFO "cpqarray: Finding drives on %s",
434 		hba[i]->devname);
435 
436 	spin_lock_init(&hba[i]->lock);
437 	q = blk_init_queue(do_ida_request, &hba[i]->lock);
438 	if (!q)
439 		goto Enomem1;
440 
441 	hba[i]->queue = q;
442 	q->queuedata = hba[i];
443 
444 	getgeometry(i);
445 	start_fwbk(i);
446 
447 	ida_procinit(i);
448 
449 	if (pdev)
450 		blk_queue_bounce_limit(q, hba[i]->pci_dev->dma_mask);
451 
452 	/* This is a hardware imposed limit. */
453 	blk_queue_max_segments(q, SG_MAX);
454 
455 	init_timer(&hba[i]->timer);
456 	hba[i]->timer.expires = jiffies + IDA_TIMER;
457 	hba[i]->timer.data = (unsigned long)hba[i];
458 	hba[i]->timer.function = ida_timer;
459 	add_timer(&hba[i]->timer);
460 
461 	/* Enable IRQ now that spinlock and rate limit timer are set up */
462 	hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
463 
464 	for(j=0; j<NWD; j++) {
465 		struct gendisk *disk = ida_gendisk[i][j];
466 		drv_info_t *drv = &hba[i]->drv[j];
467 		sprintf(disk->disk_name, "ida/c%dd%d", i, j);
468 		disk->major = COMPAQ_SMART2_MAJOR + i;
469 		disk->first_minor = j<<NWD_SHIFT;
470 		disk->fops = &ida_fops;
471 		if (j && !drv->nr_blks)
472 			continue;
473 		blk_queue_logical_block_size(hba[i]->queue, drv->blk_size);
474 		set_capacity(disk, drv->nr_blks);
475 		disk->queue = hba[i]->queue;
476 		disk->private_data = drv;
477 		add_disk(disk);
478 	}
479 
480 	/* done ! */
481 	return(i);
482 
483 Enomem1:
484 	nr_ctlr = i;
485 	kfree(hba[i]->cmd_pool_bits);
486 	if (hba[i]->cmd_pool)
487 		pci_free_consistent(hba[i]->pci_dev, NR_CMDS*sizeof(cmdlist_t),
488 				    hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
489 Enomem2:
490 	while (j--) {
491 		put_disk(ida_gendisk[i][j]);
492 		ida_gendisk[i][j] = NULL;
493 	}
494 	free_irq(hba[i]->intr, hba[i]);
495 Enomem3:
496 	unregister_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname);
497 Enomem4:
498 	if (pdev)
499 		pci_set_drvdata(pdev, NULL);
500 	release_io_mem(hba[i]);
501 	free_hba(i);
502 
503 	printk( KERN_ERR "cpqarray: out of memory");
504 
505 	return -1;
506 }
507 
cpqarray_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)508 static int __devinit cpqarray_init_one( struct pci_dev *pdev,
509 	const struct pci_device_id *ent)
510 {
511 	int i;
512 
513 	printk(KERN_DEBUG "cpqarray: Device 0x%x has been found at"
514 			" bus %d dev %d func %d\n",
515 			pdev->device, pdev->bus->number, PCI_SLOT(pdev->devfn),
516 			PCI_FUNC(pdev->devfn));
517 	i = alloc_cpqarray_hba();
518 	if( i < 0 )
519 		return (-1);
520 	memset(hba[i], 0, sizeof(ctlr_info_t));
521 	sprintf(hba[i]->devname, "ida%d", i);
522 	hba[i]->ctlr = i;
523 	/* Initialize the pdev driver private data */
524 	pci_set_drvdata(pdev, hba[i]);
525 
526 	if (cpqarray_pci_init(hba[i], pdev) != 0) {
527 		pci_set_drvdata(pdev, NULL);
528 		release_io_mem(hba[i]);
529 		free_hba(i);
530 		return -1;
531 	}
532 
533 	return (cpqarray_register_ctlr(i, pdev));
534 }
535 
536 static struct pci_driver cpqarray_pci_driver = {
537 	.name = "cpqarray",
538 	.probe = cpqarray_init_one,
539 	.remove = __devexit_p(cpqarray_remove_one_pci),
540 	.id_table = cpqarray_pci_device_id,
541 };
542 
543 /*
544  *  This is it.  Find all the controllers and register them.
545  *  returns the number of block devices registered.
546  */
cpqarray_init(void)547 static int __init cpqarray_init(void)
548 {
549 	int num_cntlrs_reg = 0;
550 	int i;
551 	int rc = 0;
552 
553 	/* detect controllers */
554 	printk(DRIVER_NAME "\n");
555 
556 	rc = pci_register_driver(&cpqarray_pci_driver);
557 	if (rc)
558 		return rc;
559 	cpqarray_eisa_detect();
560 
561 	for (i=0; i < MAX_CTLR; i++) {
562 		if (hba[i] != NULL)
563 			num_cntlrs_reg++;
564 	}
565 
566 	if (num_cntlrs_reg)
567 		return 0;
568 	else {
569 		pci_unregister_driver(&cpqarray_pci_driver);
570 		return -ENODEV;
571 	}
572 }
573 
574 /* Function to find the first free pointer into our hba[] array */
575 /* Returns -1 if no free entries are left.  */
alloc_cpqarray_hba(void)576 static int alloc_cpqarray_hba(void)
577 {
578 	int i;
579 
580 	for(i=0; i< MAX_CTLR; i++) {
581 		if (hba[i] == NULL) {
582 			hba[i] = kmalloc(sizeof(ctlr_info_t), GFP_KERNEL);
583 			if(hba[i]==NULL) {
584 				printk(KERN_ERR "cpqarray: out of memory.\n");
585 				return (-1);
586 			}
587 			return (i);
588 		}
589 	}
590 	printk(KERN_WARNING "cpqarray: This driver supports a maximum"
591 		" of 8 controllers.\n");
592 	return(-1);
593 }
594 
free_hba(int i)595 static void free_hba(int i)
596 {
597 	kfree(hba[i]);
598 	hba[i]=NULL;
599 }
600 
601 /*
602  * Find the IO address of the controller, its IRQ and so forth.  Fill
603  * in some basic stuff into the ctlr_info_t structure.
604  */
cpqarray_pci_init(ctlr_info_t * c,struct pci_dev * pdev)605 static int cpqarray_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
606 {
607 	ushort vendor_id, device_id, command;
608 	unchar cache_line_size, latency_timer;
609 	unchar irq, revision;
610 	unsigned long addr[6];
611 	__u32 board_id;
612 
613 	int i;
614 
615 	c->pci_dev = pdev;
616 	pci_set_master(pdev);
617 	if (pci_enable_device(pdev)) {
618 		printk(KERN_ERR "cpqarray: Unable to Enable PCI device\n");
619 		return -1;
620 	}
621 	vendor_id = pdev->vendor;
622 	device_id = pdev->device;
623 	revision  = pdev->revision;
624 	irq = pdev->irq;
625 
626 	for(i=0; i<6; i++)
627 		addr[i] = pci_resource_start(pdev, i);
628 
629 	if (pci_set_dma_mask(pdev, CPQARRAY_DMA_MASK) != 0)
630 	{
631 		printk(KERN_ERR "cpqarray: Unable to set DMA mask\n");
632 		return -1;
633 	}
634 
635 	pci_read_config_word(pdev, PCI_COMMAND, &command);
636 	pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_line_size);
637 	pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &latency_timer);
638 
639 	pci_read_config_dword(pdev, 0x2c, &board_id);
640 
641 	/* check to see if controller has been disabled */
642 	if(!(command & 0x02)) {
643 		printk(KERN_WARNING
644 			"cpqarray: controller appears to be disabled\n");
645 		return(-1);
646 	}
647 
648 DBGINFO(
649 	printk("vendor_id = %x\n", vendor_id);
650 	printk("device_id = %x\n", device_id);
651 	printk("command = %x\n", command);
652 	for(i=0; i<6; i++)
653 		printk("addr[%d] = %lx\n", i, addr[i]);
654 	printk("revision = %x\n", revision);
655 	printk("irq = %x\n", irq);
656 	printk("cache_line_size = %x\n", cache_line_size);
657 	printk("latency_timer = %x\n", latency_timer);
658 	printk("board_id = %x\n", board_id);
659 );
660 
661 	c->intr = irq;
662 
663 	for(i=0; i<6; i++) {
664 		if (pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO)
665 		{ /* IO space */
666 			c->io_mem_addr = addr[i];
667 			c->io_mem_length = pci_resource_end(pdev, i)
668 				- pci_resource_start(pdev, i) + 1;
669 			if(!request_region( c->io_mem_addr, c->io_mem_length,
670 				"cpqarray"))
671 			{
672 				printk( KERN_WARNING "cpqarray I/O memory range already in use addr %lx length = %ld\n", c->io_mem_addr, c->io_mem_length);
673 				c->io_mem_addr = 0;
674 				c->io_mem_length = 0;
675 			}
676 			break;
677 		}
678 	}
679 
680 	c->paddr = 0;
681 	for(i=0; i<6; i++)
682 		if (!(pci_resource_flags(pdev, i) &
683 				PCI_BASE_ADDRESS_SPACE_IO)) {
684 			c->paddr = pci_resource_start (pdev, i);
685 			break;
686 		}
687 	if (!c->paddr)
688 		return -1;
689 	c->vaddr = remap_pci_mem(c->paddr, 128);
690 	if (!c->vaddr)
691 		return -1;
692 	c->board_id = board_id;
693 
694 	for(i=0; i<NR_PRODUCTS; i++) {
695 		if (board_id == products[i].board_id) {
696 			c->product_name = products[i].product_name;
697 			c->access = *(products[i].access);
698 			break;
699 		}
700 	}
701 	if (i == NR_PRODUCTS) {
702 		printk(KERN_WARNING "cpqarray: Sorry, I don't know how"
703 			" to access the SMART Array controller %08lx\n",
704 				(unsigned long)board_id);
705 		return -1;
706 	}
707 
708 	return 0;
709 }
710 
711 /*
712  * Map (physical) PCI mem into (virtual) kernel space
713  */
remap_pci_mem(ulong base,ulong size)714 static void __iomem *remap_pci_mem(ulong base, ulong size)
715 {
716         ulong page_base        = ((ulong) base) & PAGE_MASK;
717         ulong page_offs        = ((ulong) base) - page_base;
718         void __iomem *page_remapped    = ioremap(page_base, page_offs+size);
719 
720         return (page_remapped ? (page_remapped + page_offs) : NULL);
721 }
722 
723 #ifndef MODULE
724 /*
725  * Config string is a comma separated set of i/o addresses of EISA cards.
726  */
cpqarray_setup(char * str)727 static int cpqarray_setup(char *str)
728 {
729 	int i, ints[9];
730 
731 	(void)get_options(str, ARRAY_SIZE(ints), ints);
732 
733 	for(i=0; i<ints[0] && i<8; i++)
734 		eisa[i] = ints[i+1];
735 	return 1;
736 }
737 
738 __setup("smart2=", cpqarray_setup);
739 
740 #endif
741 
742 /*
743  * Find an EISA controller's signature.  Set up an hba if we find it.
744  */
cpqarray_eisa_detect(void)745 static int __devinit cpqarray_eisa_detect(void)
746 {
747 	int i=0, j;
748 	__u32 board_id;
749 	int intr;
750 	int ctlr;
751 	int num_ctlr = 0;
752 
753 	while(i<8 && eisa[i]) {
754 		ctlr = alloc_cpqarray_hba();
755 		if(ctlr == -1)
756 			break;
757 		board_id = inl(eisa[i]+0xC80);
758 		for(j=0; j < NR_PRODUCTS; j++)
759 			if (board_id == products[j].board_id)
760 				break;
761 
762 		if (j == NR_PRODUCTS) {
763 			printk(KERN_WARNING "cpqarray: Sorry, I don't know how"
764 				" to access the SMART Array controller %08lx\n",				 (unsigned long)board_id);
765 			continue;
766 		}
767 
768 		memset(hba[ctlr], 0, sizeof(ctlr_info_t));
769 		hba[ctlr]->io_mem_addr = eisa[i];
770 		hba[ctlr]->io_mem_length = 0x7FF;
771 		if(!request_region(hba[ctlr]->io_mem_addr,
772 				hba[ctlr]->io_mem_length,
773 				"cpqarray"))
774 		{
775 			printk(KERN_WARNING "cpqarray: I/O range already in "
776 					"use addr = %lx length = %ld\n",
777 					hba[ctlr]->io_mem_addr,
778 					hba[ctlr]->io_mem_length);
779 			free_hba(ctlr);
780 			continue;
781 		}
782 
783 		/*
784 		 * Read the config register to find our interrupt
785 		 */
786 		intr = inb(eisa[i]+0xCC0) >> 4;
787 		if (intr & 1) intr = 11;
788 		else if (intr & 2) intr = 10;
789 		else if (intr & 4) intr = 14;
790 		else if (intr & 8) intr = 15;
791 
792 		hba[ctlr]->intr = intr;
793 		sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
794 		hba[ctlr]->product_name = products[j].product_name;
795 		hba[ctlr]->access = *(products[j].access);
796 		hba[ctlr]->ctlr = ctlr;
797 		hba[ctlr]->board_id = board_id;
798 		hba[ctlr]->pci_dev = NULL; /* not PCI */
799 
800 DBGINFO(
801 	printk("i = %d, j = %d\n", i, j);
802 	printk("irq = %x\n", intr);
803 	printk("product name = %s\n", products[j].product_name);
804 	printk("board_id = %x\n", board_id);
805 );
806 
807 		num_ctlr++;
808 		i++;
809 
810 		if (cpqarray_register_ctlr(ctlr, NULL) == -1)
811 			printk(KERN_WARNING
812 				"cpqarray: Can't register EISA controller %d\n",
813 				ctlr);
814 
815 	}
816 
817 	return num_ctlr;
818 }
819 
820 /*
821  * Open.  Make sure the device is really there.
822  */
ida_open(struct block_device * bdev,fmode_t mode)823 static int ida_open(struct block_device *bdev, fmode_t mode)
824 {
825 	drv_info_t *drv = get_drv(bdev->bd_disk);
826 	ctlr_info_t *host = get_host(bdev->bd_disk);
827 
828 	DBGINFO(printk("ida_open %s\n", bdev->bd_disk->disk_name));
829 	/*
830 	 * Root is allowed to open raw volume zero even if it's not configured
831 	 * so array config can still work.  I don't think I really like this,
832 	 * but I'm already using way to many device nodes to claim another one
833 	 * for "raw controller".
834 	 */
835 	if (!drv->nr_blks) {
836 		if (!capable(CAP_SYS_RAWIO))
837 			return -ENXIO;
838 		if (!capable(CAP_SYS_ADMIN) && drv != host->drv)
839 			return -ENXIO;
840 	}
841 	host->usage_count++;
842 	return 0;
843 }
844 
ida_unlocked_open(struct block_device * bdev,fmode_t mode)845 static int ida_unlocked_open(struct block_device *bdev, fmode_t mode)
846 {
847 	int ret;
848 
849 	mutex_lock(&cpqarray_mutex);
850 	ret = ida_open(bdev, mode);
851 	mutex_unlock(&cpqarray_mutex);
852 
853 	return ret;
854 }
855 
856 /*
857  * Close.  Sync first.
858  */
ida_release(struct gendisk * disk,fmode_t mode)859 static int ida_release(struct gendisk *disk, fmode_t mode)
860 {
861 	ctlr_info_t *host;
862 
863 	mutex_lock(&cpqarray_mutex);
864 	host = get_host(disk);
865 	host->usage_count--;
866 	mutex_unlock(&cpqarray_mutex);
867 
868 	return 0;
869 }
870 
871 /*
872  * Enqueuing and dequeuing functions for cmdlists.
873  */
addQ(cmdlist_t ** Qptr,cmdlist_t * c)874 static inline void addQ(cmdlist_t **Qptr, cmdlist_t *c)
875 {
876 	if (*Qptr == NULL) {
877 		*Qptr = c;
878 		c->next = c->prev = c;
879 	} else {
880 		c->prev = (*Qptr)->prev;
881 		c->next = (*Qptr);
882 		(*Qptr)->prev->next = c;
883 		(*Qptr)->prev = c;
884 	}
885 }
886 
removeQ(cmdlist_t ** Qptr,cmdlist_t * c)887 static inline cmdlist_t *removeQ(cmdlist_t **Qptr, cmdlist_t *c)
888 {
889 	if (c && c->next != c) {
890 		if (*Qptr == c) *Qptr = c->next;
891 		c->prev->next = c->next;
892 		c->next->prev = c->prev;
893 	} else {
894 		*Qptr = NULL;
895 	}
896 	return c;
897 }
898 
899 /*
900  * Get a request and submit it to the controller.
901  * This routine needs to grab all the requests it possibly can from the
902  * req Q and submit them.  Interrupts are off (and need to be off) when you
903  * are in here (either via the dummy do_ida_request functions or by being
904  * called from the interrupt handler
905  */
do_ida_request(struct request_queue * q)906 static void do_ida_request(struct request_queue *q)
907 {
908 	ctlr_info_t *h = q->queuedata;
909 	cmdlist_t *c;
910 	struct request *creq;
911 	struct scatterlist tmp_sg[SG_MAX];
912 	int i, dir, seg;
913 
914 queue_next:
915 	creq = blk_peek_request(q);
916 	if (!creq)
917 		goto startio;
918 
919 	BUG_ON(creq->nr_phys_segments > SG_MAX);
920 
921 	if ((c = cmd_alloc(h,1)) == NULL)
922 		goto startio;
923 
924 	blk_start_request(creq);
925 
926 	c->ctlr = h->ctlr;
927 	c->hdr.unit = (drv_info_t *)(creq->rq_disk->private_data) - h->drv;
928 	c->hdr.size = sizeof(rblk_t) >> 2;
929 	c->size += sizeof(rblk_t);
930 
931 	c->req.hdr.blk = blk_rq_pos(creq);
932 	c->rq = creq;
933 DBGPX(
934 	printk("sector=%d, nr_sectors=%u\n",
935 	       blk_rq_pos(creq), blk_rq_sectors(creq));
936 );
937 	sg_init_table(tmp_sg, SG_MAX);
938 	seg = blk_rq_map_sg(q, creq, tmp_sg);
939 
940 	/* Now do all the DMA Mappings */
941 	if (rq_data_dir(creq) == READ)
942 		dir = PCI_DMA_FROMDEVICE;
943 	else
944 		dir = PCI_DMA_TODEVICE;
945 	for( i=0; i < seg; i++)
946 	{
947 		c->req.sg[i].size = tmp_sg[i].length;
948 		c->req.sg[i].addr = (__u32) pci_map_page(h->pci_dev,
949 						 sg_page(&tmp_sg[i]),
950 						 tmp_sg[i].offset,
951 						 tmp_sg[i].length, dir);
952 	}
953 DBGPX(	printk("Submitting %u sectors in %d segments\n", blk_rq_sectors(creq), seg); );
954 	c->req.hdr.sg_cnt = seg;
955 	c->req.hdr.blk_cnt = blk_rq_sectors(creq);
956 	c->req.hdr.cmd = (rq_data_dir(creq) == READ) ? IDA_READ : IDA_WRITE;
957 	c->type = CMD_RWREQ;
958 
959 	/* Put the request on the tail of the request queue */
960 	addQ(&h->reqQ, c);
961 	h->Qdepth++;
962 	if (h->Qdepth > h->maxQsinceinit)
963 		h->maxQsinceinit = h->Qdepth;
964 
965 	goto queue_next;
966 
967 startio:
968 	start_io(h);
969 }
970 
971 /*
972  * start_io submits everything on a controller's request queue
973  * and moves it to the completion queue.
974  *
975  * Interrupts had better be off if you're in here
976  */
start_io(ctlr_info_t * h)977 static void start_io(ctlr_info_t *h)
978 {
979 	cmdlist_t *c;
980 
981 	while((c = h->reqQ) != NULL) {
982 		/* Can't do anything if we're busy */
983 		if (h->access.fifo_full(h) == 0)
984 			return;
985 
986 		/* Get the first entry from the request Q */
987 		removeQ(&h->reqQ, c);
988 		h->Qdepth--;
989 
990 		/* Tell the controller to do our bidding */
991 		h->access.submit_command(h, c);
992 
993 		/* Get onto the completion Q */
994 		addQ(&h->cmpQ, c);
995 	}
996 }
997 
998 /*
999  * Mark all buffers that cmd was responsible for
1000  */
complete_command(cmdlist_t * cmd,int timeout)1001 static inline void complete_command(cmdlist_t *cmd, int timeout)
1002 {
1003 	struct request *rq = cmd->rq;
1004 	int error = 0;
1005 	int i, ddir;
1006 
1007 	if (cmd->req.hdr.rcode & RCODE_NONFATAL &&
1008 	   (hba[cmd->ctlr]->misc_tflags & MISC_NONFATAL_WARN) == 0) {
1009 		printk(KERN_NOTICE "Non Fatal error on ida/c%dd%d\n",
1010 				cmd->ctlr, cmd->hdr.unit);
1011 		hba[cmd->ctlr]->misc_tflags |= MISC_NONFATAL_WARN;
1012 	}
1013 	if (cmd->req.hdr.rcode & RCODE_FATAL) {
1014 		printk(KERN_WARNING "Fatal error on ida/c%dd%d\n",
1015 				cmd->ctlr, cmd->hdr.unit);
1016 		error = -EIO;
1017 	}
1018 	if (cmd->req.hdr.rcode & RCODE_INVREQ) {
1019 				printk(KERN_WARNING "Invalid request on ida/c%dd%d = (cmd=%x sect=%d cnt=%d sg=%d ret=%x)\n",
1020 				cmd->ctlr, cmd->hdr.unit, cmd->req.hdr.cmd,
1021 				cmd->req.hdr.blk, cmd->req.hdr.blk_cnt,
1022 				cmd->req.hdr.sg_cnt, cmd->req.hdr.rcode);
1023 		error = -EIO;
1024 	}
1025 	if (timeout)
1026 		error = -EIO;
1027 	/* unmap the DMA mapping for all the scatter gather elements */
1028 	if (cmd->req.hdr.cmd == IDA_READ)
1029 		ddir = PCI_DMA_FROMDEVICE;
1030 	else
1031 		ddir = PCI_DMA_TODEVICE;
1032         for(i=0; i<cmd->req.hdr.sg_cnt; i++)
1033                 pci_unmap_page(hba[cmd->ctlr]->pci_dev, cmd->req.sg[i].addr,
1034 				cmd->req.sg[i].size, ddir);
1035 
1036 	DBGPX(printk("Done with %p\n", rq););
1037 	__blk_end_request_all(rq, error);
1038 }
1039 
1040 /*
1041  *  The controller will interrupt us upon completion of commands.
1042  *  Find the command on the completion queue, remove it, tell the OS and
1043  *  try to queue up more IO
1044  */
do_ida_intr(int irq,void * dev_id)1045 static irqreturn_t do_ida_intr(int irq, void *dev_id)
1046 {
1047 	ctlr_info_t *h = dev_id;
1048 	cmdlist_t *c;
1049 	unsigned long istat;
1050 	unsigned long flags;
1051 	__u32 a,a1;
1052 
1053 	istat = h->access.intr_pending(h);
1054 	/* Is this interrupt for us? */
1055 	if (istat == 0)
1056 		return IRQ_NONE;
1057 
1058 	/*
1059 	 * If there are completed commands in the completion queue,
1060 	 * we had better do something about it.
1061 	 */
1062 	spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
1063 	if (istat & FIFO_NOT_EMPTY) {
1064 		while((a = h->access.command_completed(h))) {
1065 			a1 = a; a &= ~3;
1066 			if ((c = h->cmpQ) == NULL)
1067 			{
1068 				printk(KERN_WARNING "cpqarray: Completion of %08lx ignored\n", (unsigned long)a1);
1069 				continue;
1070 			}
1071 			while(c->busaddr != a) {
1072 				c = c->next;
1073 				if (c == h->cmpQ)
1074 					break;
1075 			}
1076 			/*
1077 			 * If we've found the command, take it off the
1078 			 * completion Q and free it
1079 			 */
1080 			if (c->busaddr == a) {
1081 				removeQ(&h->cmpQ, c);
1082 				/*  Check for invalid command.
1083                                  *  Controller returns command error,
1084                                  *  But rcode = 0.
1085                                  */
1086 
1087 				if((a1 & 0x03) && (c->req.hdr.rcode == 0))
1088                                 {
1089                                 	c->req.hdr.rcode = RCODE_INVREQ;
1090                                 }
1091 				if (c->type == CMD_RWREQ) {
1092 					complete_command(c, 0);
1093 					cmd_free(h, c, 1);
1094 				} else if (c->type == CMD_IOCTL_PEND) {
1095 					c->type = CMD_IOCTL_DONE;
1096 				}
1097 				continue;
1098 			}
1099 		}
1100 	}
1101 
1102 	/*
1103 	 * See if we can queue up some more IO
1104 	 */
1105 	do_ida_request(h->queue);
1106 	spin_unlock_irqrestore(IDA_LOCK(h->ctlr), flags);
1107 	return IRQ_HANDLED;
1108 }
1109 
1110 /*
1111  * This timer was for timing out requests that haven't happened after
1112  * IDA_TIMEOUT.  That wasn't such a good idea.  This timer is used to
1113  * reset a flags structure so we don't flood the user with
1114  * "Non-Fatal error" messages.
1115  */
ida_timer(unsigned long tdata)1116 static void ida_timer(unsigned long tdata)
1117 {
1118 	ctlr_info_t *h = (ctlr_info_t*)tdata;
1119 
1120 	h->timer.expires = jiffies + IDA_TIMER;
1121 	add_timer(&h->timer);
1122 	h->misc_tflags = 0;
1123 }
1124 
ida_getgeo(struct block_device * bdev,struct hd_geometry * geo)1125 static int ida_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1126 {
1127 	drv_info_t *drv = get_drv(bdev->bd_disk);
1128 
1129 	if (drv->cylinders) {
1130 		geo->heads = drv->heads;
1131 		geo->sectors = drv->sectors;
1132 		geo->cylinders = drv->cylinders;
1133 	} else {
1134 		geo->heads = 0xff;
1135 		geo->sectors = 0x3f;
1136 		geo->cylinders = drv->nr_blks / (0xff*0x3f);
1137 	}
1138 
1139 	return 0;
1140 }
1141 
1142 /*
1143  *  ida_ioctl does some miscellaneous stuff like reporting drive geometry,
1144  *  setting readahead and submitting commands from userspace to the controller.
1145  */
ida_locked_ioctl(struct block_device * bdev,fmode_t mode,unsigned int cmd,unsigned long arg)1146 static int ida_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg)
1147 {
1148 	drv_info_t *drv = get_drv(bdev->bd_disk);
1149 	ctlr_info_t *host = get_host(bdev->bd_disk);
1150 	int error;
1151 	ida_ioctl_t __user *io = (ida_ioctl_t __user *)arg;
1152 	ida_ioctl_t *my_io;
1153 
1154 	switch(cmd) {
1155 	case IDAGETDRVINFO:
1156 		if (copy_to_user(&io->c.drv, drv, sizeof(drv_info_t)))
1157 			return -EFAULT;
1158 		return 0;
1159 	case IDAPASSTHRU:
1160 		if (!capable(CAP_SYS_RAWIO))
1161 			return -EPERM;
1162 		my_io = kmalloc(sizeof(ida_ioctl_t), GFP_KERNEL);
1163 		if (!my_io)
1164 			return -ENOMEM;
1165 		error = -EFAULT;
1166 		if (copy_from_user(my_io, io, sizeof(*my_io)))
1167 			goto out_passthru;
1168 		error = ida_ctlr_ioctl(host, drv - host->drv, my_io);
1169 		if (error)
1170 			goto out_passthru;
1171 		error = -EFAULT;
1172 		if (copy_to_user(io, my_io, sizeof(*my_io)))
1173 			goto out_passthru;
1174 		error = 0;
1175 out_passthru:
1176 		kfree(my_io);
1177 		return error;
1178 	case IDAGETCTLRSIG:
1179 		if (!arg) return -EINVAL;
1180 		if (put_user(host->ctlr_sig, (int __user *)arg))
1181 			return -EFAULT;
1182 		return 0;
1183 	case IDAREVALIDATEVOLS:
1184 		if (MINOR(bdev->bd_dev) != 0)
1185 			return -ENXIO;
1186 		return revalidate_allvol(host);
1187 	case IDADRIVERVERSION:
1188 		if (!arg) return -EINVAL;
1189 		if (put_user(DRIVER_VERSION, (unsigned long __user *)arg))
1190 			return -EFAULT;
1191 		return 0;
1192 	case IDAGETPCIINFO:
1193 	{
1194 
1195 		ida_pci_info_struct pciinfo;
1196 
1197 		if (!arg) return -EINVAL;
1198 		pciinfo.bus = host->pci_dev->bus->number;
1199 		pciinfo.dev_fn = host->pci_dev->devfn;
1200 		pciinfo.board_id = host->board_id;
1201 		if(copy_to_user((void __user *) arg, &pciinfo,
1202 			sizeof( ida_pci_info_struct)))
1203 				return -EFAULT;
1204 		return(0);
1205 	}
1206 
1207 	default:
1208 		return -EINVAL;
1209 	}
1210 
1211 }
1212 
ida_ioctl(struct block_device * bdev,fmode_t mode,unsigned int cmd,unsigned long param)1213 static int ida_ioctl(struct block_device *bdev, fmode_t mode,
1214 			     unsigned int cmd, unsigned long param)
1215 {
1216 	int ret;
1217 
1218 	mutex_lock(&cpqarray_mutex);
1219 	ret = ida_locked_ioctl(bdev, mode, cmd, param);
1220 	mutex_unlock(&cpqarray_mutex);
1221 
1222 	return ret;
1223 }
1224 
1225 /*
1226  * ida_ctlr_ioctl is for passing commands to the controller from userspace.
1227  * The command block (io) has already been copied to kernel space for us,
1228  * however, any elements in the sglist need to be copied to kernel space
1229  * or copied back to userspace.
1230  *
1231  * Only root may perform a controller passthru command, however I'm not doing
1232  * any serious sanity checking on the arguments.  Doing an IDA_WRITE_MEDIA and
1233  * putting a 64M buffer in the sglist is probably a *bad* idea.
1234  */
ida_ctlr_ioctl(ctlr_info_t * h,int dsk,ida_ioctl_t * io)1235 static int ida_ctlr_ioctl(ctlr_info_t *h, int dsk, ida_ioctl_t *io)
1236 {
1237 	int ctlr = h->ctlr;
1238 	cmdlist_t *c;
1239 	void *p = NULL;
1240 	unsigned long flags;
1241 	int error;
1242 
1243 	if ((c = cmd_alloc(h, 0)) == NULL)
1244 		return -ENOMEM;
1245 	c->ctlr = ctlr;
1246 	c->hdr.unit = (io->unit & UNITVALID) ? (io->unit & ~UNITVALID) : dsk;
1247 	c->hdr.size = sizeof(rblk_t) >> 2;
1248 	c->size += sizeof(rblk_t);
1249 
1250 	c->req.hdr.cmd = io->cmd;
1251 	c->req.hdr.blk = io->blk;
1252 	c->req.hdr.blk_cnt = io->blk_cnt;
1253 	c->type = CMD_IOCTL_PEND;
1254 
1255 	/* Pre submit processing */
1256 	switch(io->cmd) {
1257 	case PASSTHRU_A:
1258 		p = memdup_user(io->sg[0].addr, io->sg[0].size);
1259 		if (IS_ERR(p)) {
1260 			error = PTR_ERR(p);
1261 			cmd_free(h, c, 0);
1262 			return error;
1263 		}
1264 		c->req.hdr.blk = pci_map_single(h->pci_dev, &(io->c),
1265 				sizeof(ida_ioctl_t),
1266 				PCI_DMA_BIDIRECTIONAL);
1267 		c->req.sg[0].size = io->sg[0].size;
1268 		c->req.sg[0].addr = pci_map_single(h->pci_dev, p,
1269 			c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1270 		c->req.hdr.sg_cnt = 1;
1271 		break;
1272 	case IDA_READ:
1273 	case READ_FLASH_ROM:
1274 	case SENSE_CONTROLLER_PERFORMANCE:
1275 		p = kmalloc(io->sg[0].size, GFP_KERNEL);
1276 		if (!p)
1277 		{
1278                         error = -ENOMEM;
1279                         cmd_free(h, c, 0);
1280                         return(error);
1281                 }
1282 
1283 		c->req.sg[0].size = io->sg[0].size;
1284 		c->req.sg[0].addr = pci_map_single(h->pci_dev, p,
1285 			c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1286 		c->req.hdr.sg_cnt = 1;
1287 		break;
1288 	case IDA_WRITE:
1289 	case IDA_WRITE_MEDIA:
1290 	case DIAG_PASS_THRU:
1291 	case COLLECT_BUFFER:
1292 	case WRITE_FLASH_ROM:
1293 		p = memdup_user(io->sg[0].addr, io->sg[0].size);
1294 		if (IS_ERR(p)) {
1295 			error = PTR_ERR(p);
1296 			cmd_free(h, c, 0);
1297 			return error;
1298                 }
1299 		c->req.sg[0].size = io->sg[0].size;
1300 		c->req.sg[0].addr = pci_map_single(h->pci_dev, p,
1301 			c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1302 		c->req.hdr.sg_cnt = 1;
1303 		break;
1304 	default:
1305 		c->req.sg[0].size = sizeof(io->c);
1306 		c->req.sg[0].addr = pci_map_single(h->pci_dev,&io->c,
1307 			c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1308 		c->req.hdr.sg_cnt = 1;
1309 	}
1310 
1311 	/* Put the request on the tail of the request queue */
1312 	spin_lock_irqsave(IDA_LOCK(ctlr), flags);
1313 	addQ(&h->reqQ, c);
1314 	h->Qdepth++;
1315 	start_io(h);
1316 	spin_unlock_irqrestore(IDA_LOCK(ctlr), flags);
1317 
1318 	/* Wait for completion */
1319 	while(c->type != CMD_IOCTL_DONE)
1320 		schedule();
1321 
1322 	/* Unmap the DMA  */
1323 	pci_unmap_single(h->pci_dev, c->req.sg[0].addr, c->req.sg[0].size,
1324 		PCI_DMA_BIDIRECTIONAL);
1325 	/* Post submit processing */
1326 	switch(io->cmd) {
1327 	case PASSTHRU_A:
1328 		pci_unmap_single(h->pci_dev, c->req.hdr.blk,
1329                                 sizeof(ida_ioctl_t),
1330                                 PCI_DMA_BIDIRECTIONAL);
1331 	case IDA_READ:
1332 	case DIAG_PASS_THRU:
1333 	case SENSE_CONTROLLER_PERFORMANCE:
1334 	case READ_FLASH_ROM:
1335 		if (copy_to_user(io->sg[0].addr, p, io->sg[0].size)) {
1336 			kfree(p);
1337 			return -EFAULT;
1338 		}
1339 		/* fall through and free p */
1340 	case IDA_WRITE:
1341 	case IDA_WRITE_MEDIA:
1342 	case COLLECT_BUFFER:
1343 	case WRITE_FLASH_ROM:
1344 		kfree(p);
1345 		break;
1346 	default:;
1347 		/* Nothing to do */
1348 	}
1349 
1350 	io->rcode = c->req.hdr.rcode;
1351 	cmd_free(h, c, 0);
1352 	return(0);
1353 }
1354 
1355 /*
1356  * Commands are pre-allocated in a large block.  Here we use a simple bitmap
1357  * scheme to suballocte them to the driver.  Operations that are not time
1358  * critical (and can wait for kmalloc and possibly sleep) can pass in NULL
1359  * as the first argument to get a new command.
1360  */
cmd_alloc(ctlr_info_t * h,int get_from_pool)1361 static cmdlist_t * cmd_alloc(ctlr_info_t *h, int get_from_pool)
1362 {
1363 	cmdlist_t * c;
1364 	int i;
1365 	dma_addr_t cmd_dhandle;
1366 
1367 	if (!get_from_pool) {
1368 		c = (cmdlist_t*)pci_alloc_consistent(h->pci_dev,
1369 			sizeof(cmdlist_t), &cmd_dhandle);
1370 		if(c==NULL)
1371 			return NULL;
1372 	} else {
1373 		do {
1374 			i = find_first_zero_bit(h->cmd_pool_bits, NR_CMDS);
1375 			if (i == NR_CMDS)
1376 				return NULL;
1377 		} while(test_and_set_bit(i&(BITS_PER_LONG-1), h->cmd_pool_bits+(i/BITS_PER_LONG)) != 0);
1378 		c = h->cmd_pool + i;
1379 		cmd_dhandle = h->cmd_pool_dhandle + i*sizeof(cmdlist_t);
1380 		h->nr_allocs++;
1381 	}
1382 
1383 	memset(c, 0, sizeof(cmdlist_t));
1384 	c->busaddr = cmd_dhandle;
1385 	return c;
1386 }
1387 
cmd_free(ctlr_info_t * h,cmdlist_t * c,int got_from_pool)1388 static void cmd_free(ctlr_info_t *h, cmdlist_t *c, int got_from_pool)
1389 {
1390 	int i;
1391 
1392 	if (!got_from_pool) {
1393 		pci_free_consistent(h->pci_dev, sizeof(cmdlist_t), c,
1394 			c->busaddr);
1395 	} else {
1396 		i = c - h->cmd_pool;
1397 		clear_bit(i&(BITS_PER_LONG-1), h->cmd_pool_bits+(i/BITS_PER_LONG));
1398 		h->nr_frees++;
1399 	}
1400 }
1401 
1402 /***********************************************************************
1403     name:        sendcmd
1404     Send a command to an IDA using the memory mapped FIFO interface
1405     and wait for it to complete.
1406     This routine should only be called at init time.
1407 ***********************************************************************/
sendcmd(__u8 cmd,int ctlr,void * buff,size_t size,unsigned int blk,unsigned int blkcnt,unsigned int log_unit)1408 static int sendcmd(
1409 	__u8	cmd,
1410 	int	ctlr,
1411 	void	*buff,
1412 	size_t	size,
1413 	unsigned int blk,
1414 	unsigned int blkcnt,
1415 	unsigned int log_unit )
1416 {
1417 	cmdlist_t *c;
1418 	int complete;
1419 	unsigned long temp;
1420 	unsigned long i;
1421 	ctlr_info_t *info_p = hba[ctlr];
1422 
1423 	c = cmd_alloc(info_p, 1);
1424 	if(!c)
1425 		return IO_ERROR;
1426 	c->ctlr = ctlr;
1427 	c->hdr.unit = log_unit;
1428 	c->hdr.prio = 0;
1429 	c->hdr.size = sizeof(rblk_t) >> 2;
1430 	c->size += sizeof(rblk_t);
1431 
1432 	/* The request information. */
1433 	c->req.hdr.next = 0;
1434 	c->req.hdr.rcode = 0;
1435 	c->req.bp = 0;
1436 	c->req.hdr.sg_cnt = 1;
1437 	c->req.hdr.reserved = 0;
1438 
1439 	if (size == 0)
1440 		c->req.sg[0].size = 512;
1441 	else
1442 		c->req.sg[0].size = size;
1443 
1444 	c->req.hdr.blk = blk;
1445 	c->req.hdr.blk_cnt = blkcnt;
1446 	c->req.hdr.cmd = (unsigned char) cmd;
1447 	c->req.sg[0].addr = (__u32) pci_map_single(info_p->pci_dev,
1448 		buff, c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1449 	/*
1450 	 * Disable interrupt
1451 	 */
1452 	info_p->access.set_intr_mask(info_p, 0);
1453 	/* Make sure there is room in the command FIFO */
1454 	/* Actually it should be completely empty at this time. */
1455 	for (i = 200000; i > 0; i--) {
1456 		temp = info_p->access.fifo_full(info_p);
1457 		if (temp != 0) {
1458 			break;
1459 		}
1460 		udelay(10);
1461 DBG(
1462 		printk(KERN_WARNING "cpqarray ida%d: idaSendPciCmd FIFO full,"
1463 			" waiting!\n", ctlr);
1464 );
1465 	}
1466 	/*
1467 	 * Send the cmd
1468 	 */
1469 	info_p->access.submit_command(info_p, c);
1470 	complete = pollcomplete(ctlr);
1471 
1472 	pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
1473 		c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1474 	if (complete != 1) {
1475 		if (complete != c->busaddr) {
1476 			printk( KERN_WARNING
1477 			"cpqarray ida%d: idaSendPciCmd "
1478 		      "Invalid command list address returned! (%08lx)\n",
1479 				ctlr, (unsigned long)complete);
1480 			cmd_free(info_p, c, 1);
1481 			return (IO_ERROR);
1482 		}
1483 	} else {
1484 		printk( KERN_WARNING
1485 			"cpqarray ida%d: idaSendPciCmd Timeout out, "
1486 			"No command list address returned!\n",
1487 			ctlr);
1488 		cmd_free(info_p, c, 1);
1489 		return (IO_ERROR);
1490 	}
1491 
1492 	if (c->req.hdr.rcode & 0x00FE) {
1493 		if (!(c->req.hdr.rcode & BIG_PROBLEM)) {
1494 			printk( KERN_WARNING
1495 			"cpqarray ida%d: idaSendPciCmd, error: "
1496 				"Controller failed at init time "
1497 				"cmd: 0x%x, return code = 0x%x\n",
1498 				ctlr, c->req.hdr.cmd, c->req.hdr.rcode);
1499 
1500 			cmd_free(info_p, c, 1);
1501 			return (IO_ERROR);
1502 		}
1503 	}
1504 	cmd_free(info_p, c, 1);
1505 	return (IO_OK);
1506 }
1507 
1508 /*
1509  * revalidate_allvol is for online array config utilities.  After a
1510  * utility reconfigures the drives in the array, it can use this function
1511  * (through an ioctl) to make the driver zap any previous disk structs for
1512  * that controller and get new ones.
1513  *
1514  * Right now I'm using the getgeometry() function to do this, but this
1515  * function should probably be finer grained and allow you to revalidate one
1516  * particualar logical volume (instead of all of them on a particular
1517  * controller).
1518  */
revalidate_allvol(ctlr_info_t * host)1519 static int revalidate_allvol(ctlr_info_t *host)
1520 {
1521 	int ctlr = host->ctlr;
1522 	int i;
1523 	unsigned long flags;
1524 
1525 	spin_lock_irqsave(IDA_LOCK(ctlr), flags);
1526 	if (host->usage_count > 1) {
1527 		spin_unlock_irqrestore(IDA_LOCK(ctlr), flags);
1528 		printk(KERN_WARNING "cpqarray: Device busy for volume"
1529 			" revalidation (usage=%d)\n", host->usage_count);
1530 		return -EBUSY;
1531 	}
1532 	host->usage_count++;
1533 	spin_unlock_irqrestore(IDA_LOCK(ctlr), flags);
1534 
1535 	/*
1536 	 * Set the partition and block size structures for all volumes
1537 	 * on this controller to zero.  We will reread all of this data
1538 	 */
1539 	set_capacity(ida_gendisk[ctlr][0], 0);
1540 	for (i = 1; i < NWD; i++) {
1541 		struct gendisk *disk = ida_gendisk[ctlr][i];
1542 		if (disk->flags & GENHD_FL_UP)
1543 			del_gendisk(disk);
1544 	}
1545 	memset(host->drv, 0, sizeof(drv_info_t)*NWD);
1546 
1547 	/*
1548 	 * Tell the array controller not to give us any interrupts while
1549 	 * we check the new geometry.  Then turn interrupts back on when
1550 	 * we're done.
1551 	 */
1552 	host->access.set_intr_mask(host, 0);
1553 	getgeometry(ctlr);
1554 	host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
1555 
1556 	for(i=0; i<NWD; i++) {
1557 		struct gendisk *disk = ida_gendisk[ctlr][i];
1558 		drv_info_t *drv = &host->drv[i];
1559 		if (i && !drv->nr_blks)
1560 			continue;
1561 		blk_queue_logical_block_size(host->queue, drv->blk_size);
1562 		set_capacity(disk, drv->nr_blks);
1563 		disk->queue = host->queue;
1564 		disk->private_data = drv;
1565 		if (i)
1566 			add_disk(disk);
1567 	}
1568 
1569 	host->usage_count--;
1570 	return 0;
1571 }
1572 
ida_revalidate(struct gendisk * disk)1573 static int ida_revalidate(struct gendisk *disk)
1574 {
1575 	drv_info_t *drv = disk->private_data;
1576 	set_capacity(disk, drv->nr_blks);
1577 	return 0;
1578 }
1579 
1580 /********************************************************************
1581     name: pollcomplete
1582     Wait polling for a command to complete.
1583     The memory mapped FIFO is polled for the completion.
1584     Used only at init time, interrupts disabled.
1585  ********************************************************************/
pollcomplete(int ctlr)1586 static int pollcomplete(int ctlr)
1587 {
1588 	int done;
1589 	int i;
1590 
1591 	/* Wait (up to 2 seconds) for a command to complete */
1592 
1593 	for (i = 200000; i > 0; i--) {
1594 		done = hba[ctlr]->access.command_completed(hba[ctlr]);
1595 		if (done == 0) {
1596 			udelay(10);	/* a short fixed delay */
1597 		} else
1598 			return (done);
1599 	}
1600 	/* Invalid address to tell caller we ran out of time */
1601 	return 1;
1602 }
1603 /*****************************************************************
1604     start_fwbk
1605     Starts controller firmwares background processing.
1606     Currently only the Integrated Raid controller needs this done.
1607     If the PCI mem address registers are written to after this,
1608 	 data corruption may occur
1609 *****************************************************************/
start_fwbk(int ctlr)1610 static void start_fwbk(int ctlr)
1611 {
1612 		id_ctlr_t *id_ctlr_buf;
1613 	int ret_code;
1614 
1615 	if(	(hba[ctlr]->board_id != 0x40400E11)
1616 		&& (hba[ctlr]->board_id != 0x40480E11) )
1617 
1618 	/* Not a Integrated Raid, so there is nothing for us to do */
1619 		return;
1620 	printk(KERN_DEBUG "cpqarray: Starting firmware's background"
1621 		" processing\n");
1622 	/* Command does not return anything, but idasend command needs a
1623 		buffer */
1624 	id_ctlr_buf = kmalloc(sizeof(id_ctlr_t), GFP_KERNEL);
1625 	if(id_ctlr_buf==NULL)
1626 	{
1627 		printk(KERN_WARNING "cpqarray: Out of memory. "
1628 			"Unable to start background processing.\n");
1629 		return;
1630 	}
1631 	ret_code = sendcmd(RESUME_BACKGROUND_ACTIVITY, ctlr,
1632 		id_ctlr_buf, 0, 0, 0, 0);
1633 	if(ret_code != IO_OK)
1634 		printk(KERN_WARNING "cpqarray: Unable to start"
1635 			" background processing\n");
1636 
1637 	kfree(id_ctlr_buf);
1638 }
1639 /*****************************************************************
1640     getgeometry
1641     Get ida logical volume geometry from the controller
1642     This is a large bit of code which once existed in two flavors,
1643     It is used only at init time.
1644 *****************************************************************/
getgeometry(int ctlr)1645 static void getgeometry(int ctlr)
1646 {
1647 	id_log_drv_t *id_ldrive;
1648 	id_ctlr_t *id_ctlr_buf;
1649 	sense_log_drv_stat_t *id_lstatus_buf;
1650 	config_t *sense_config_buf;
1651 	unsigned int log_unit, log_index;
1652 	int ret_code, size;
1653 	drv_info_t *drv;
1654 	ctlr_info_t *info_p = hba[ctlr];
1655 	int i;
1656 
1657 	info_p->log_drv_map = 0;
1658 
1659 	id_ldrive = kzalloc(sizeof(id_log_drv_t), GFP_KERNEL);
1660 	if (!id_ldrive)	{
1661 		printk( KERN_ERR "cpqarray:  out of memory.\n");
1662 		goto err_0;
1663 	}
1664 
1665 	id_ctlr_buf = kzalloc(sizeof(id_ctlr_t), GFP_KERNEL);
1666 	if (!id_ctlr_buf) {
1667 		printk( KERN_ERR "cpqarray:  out of memory.\n");
1668 		goto err_1;
1669 	}
1670 
1671 	id_lstatus_buf = kzalloc(sizeof(sense_log_drv_stat_t), GFP_KERNEL);
1672 	if (!id_lstatus_buf) {
1673 		printk( KERN_ERR "cpqarray:  out of memory.\n");
1674 		goto err_2;
1675 	}
1676 
1677 	sense_config_buf = kzalloc(sizeof(config_t), GFP_KERNEL);
1678 	if (!sense_config_buf) {
1679 		printk( KERN_ERR "cpqarray:  out of memory.\n");
1680 		goto err_3;
1681 	}
1682 
1683 	info_p->phys_drives = 0;
1684 	info_p->log_drv_map = 0;
1685 	info_p->drv_assign_map = 0;
1686 	info_p->drv_spare_map = 0;
1687 	info_p->mp_failed_drv_map = 0;	/* only initialized here */
1688 	/* Get controllers info for this logical drive */
1689 	ret_code = sendcmd(ID_CTLR, ctlr, id_ctlr_buf, 0, 0, 0, 0);
1690 	if (ret_code == IO_ERROR) {
1691 		/*
1692 		 * If can't get controller info, set the logical drive map to 0,
1693 		 * so the idastubopen will fail on all logical drives
1694 		 * on the controller.
1695 		 */
1696 		printk(KERN_ERR "cpqarray: error sending ID controller\n");
1697                 goto err_4;
1698         }
1699 
1700 	info_p->log_drives = id_ctlr_buf->nr_drvs;
1701 	for(i=0;i<4;i++)
1702 		info_p->firm_rev[i] = id_ctlr_buf->firm_rev[i];
1703 	info_p->ctlr_sig = id_ctlr_buf->cfg_sig;
1704 
1705 	printk(" (%s)\n", info_p->product_name);
1706 	/*
1707 	 * Initialize logical drive map to zero
1708 	 */
1709 	log_index = 0;
1710 	/*
1711 	 * Get drive geometry for all logical drives
1712 	 */
1713 	if (id_ctlr_buf->nr_drvs > 16)
1714 		printk(KERN_WARNING "cpqarray ida%d:  This driver supports "
1715 			"16 logical drives per controller.\n.  "
1716 			" Additional drives will not be "
1717 			"detected\n", ctlr);
1718 
1719 	for (log_unit = 0;
1720 	     (log_index < id_ctlr_buf->nr_drvs)
1721 	     && (log_unit < NWD);
1722 	     log_unit++) {
1723 		size = sizeof(sense_log_drv_stat_t);
1724 
1725 		/*
1726 		   Send "Identify logical drive status" cmd
1727 		 */
1728 		ret_code = sendcmd(SENSE_LOG_DRV_STAT,
1729 			     ctlr, id_lstatus_buf, size, 0, 0, log_unit);
1730 		if (ret_code == IO_ERROR) {
1731 			/*
1732 			   If can't get logical drive status, set
1733 			   the logical drive map to 0, so the
1734 			   idastubopen will fail for all logical drives
1735 			   on the controller.
1736 			 */
1737 			info_p->log_drv_map = 0;
1738 			printk( KERN_WARNING
1739 			     "cpqarray ida%d: idaGetGeometry - Controller"
1740 				" failed to report status of logical drive %d\n"
1741 			 "Access to this controller has been disabled\n",
1742 				ctlr, log_unit);
1743                 	goto err_4;
1744 		}
1745 		/*
1746 		   Make sure the logical drive is configured
1747 		 */
1748 		if (id_lstatus_buf->status != LOG_NOT_CONF) {
1749 			ret_code = sendcmd(ID_LOG_DRV, ctlr, id_ldrive,
1750 			       sizeof(id_log_drv_t), 0, 0, log_unit);
1751 			/*
1752 			   If error, the bit for this
1753 			   logical drive won't be set and
1754 			   idastubopen will return error.
1755 			 */
1756 			if (ret_code != IO_ERROR) {
1757 				drv = &info_p->drv[log_unit];
1758 				drv->blk_size = id_ldrive->blk_size;
1759 				drv->nr_blks = id_ldrive->nr_blks;
1760 				drv->cylinders = id_ldrive->drv.cyl;
1761 				drv->heads = id_ldrive->drv.heads;
1762 				drv->sectors = id_ldrive->drv.sect_per_track;
1763 				info_p->log_drv_map |=	(1 << log_unit);
1764 
1765 	printk(KERN_INFO "cpqarray ida/c%dd%d: blksz=%d nr_blks=%d\n",
1766 		ctlr, log_unit, drv->blk_size, drv->nr_blks);
1767 				ret_code = sendcmd(SENSE_CONFIG,
1768 						  ctlr, sense_config_buf,
1769 				 sizeof(config_t), 0, 0, log_unit);
1770 				if (ret_code == IO_ERROR) {
1771 					info_p->log_drv_map = 0;
1772                 			printk(KERN_ERR "cpqarray: error sending sense config\n");
1773                 			goto err_4;
1774 				}
1775 
1776 				info_p->phys_drives =
1777 				    sense_config_buf->ctlr_phys_drv;
1778 				info_p->drv_assign_map
1779 				    |= sense_config_buf->drv_asgn_map;
1780 				info_p->drv_assign_map
1781 				    |= sense_config_buf->spare_asgn_map;
1782 				info_p->drv_spare_map
1783 				    |= sense_config_buf->spare_asgn_map;
1784 			}	/* end of if no error on id_ldrive */
1785 			log_index = log_index + 1;
1786 		}		/* end of if logical drive configured */
1787 	}			/* end of for log_unit */
1788 
1789 	/* Free all the buffers and return */
1790 err_4:
1791 	kfree(sense_config_buf);
1792 err_3:
1793   	kfree(id_lstatus_buf);
1794 err_2:
1795 	kfree(id_ctlr_buf);
1796 err_1:
1797   	kfree(id_ldrive);
1798 err_0:
1799 	return;
1800 }
1801 
cpqarray_exit(void)1802 static void __exit cpqarray_exit(void)
1803 {
1804 	int i;
1805 
1806 	pci_unregister_driver(&cpqarray_pci_driver);
1807 
1808 	/* Double check that all controller entries have been removed */
1809 	for(i=0; i<MAX_CTLR; i++) {
1810 		if (hba[i] != NULL) {
1811 			printk(KERN_WARNING "cpqarray: Removing EISA "
1812 					"controller %d\n", i);
1813 			cpqarray_remove_one_eisa(i);
1814 		}
1815 	}
1816 
1817 	remove_proc_entry("driver/cpqarray", NULL);
1818 }
1819 
1820 module_init(cpqarray_init)
1821 module_exit(cpqarray_exit)
1822