1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (c) 2012 - 2015 UNISYS CORPORATION
4  * All rights reserved.
5  */
6 
7 #include <linux/debugfs.h>
8 #include <linux/kthread.h>
9 #include <linux/idr.h>
10 #include <linux/module.h>
11 #include <linux/seq_file.h>
12 #include <linux/visorbus.h>
13 #include <scsi/scsi.h>
14 #include <scsi/scsi_host.h>
15 #include <scsi/scsi_cmnd.h>
16 #include <scsi/scsi_device.h>
17 
18 #include "iochannel.h"
19 
20 /* The Send and Receive Buffers of the IO Queue may both be full */
21 
22 #define IOS_ERROR_THRESHOLD  1000
23 #define MAX_PENDING_REQUESTS (MIN_NUMSIGNALS * 2)
24 #define VISORHBA_ERROR_COUNT 30
25 
26 static struct dentry *visorhba_debugfs_dir;
27 
28 /* GUIDS for HBA channel type supported by this driver */
29 static struct visor_channeltype_descriptor visorhba_channel_types[] = {
30 	/* Note that the only channel type we expect to be reported by the
31 	 * bus driver is the VISOR_VHBA channel.
32 	 */
33 	{ VISOR_VHBA_CHANNEL_GUID, "sparvhba", sizeof(struct channel_header),
34 	  VISOR_VHBA_CHANNEL_VERSIONID },
35 	{}
36 };
37 
38 MODULE_DEVICE_TABLE(visorbus, visorhba_channel_types);
39 MODULE_ALIAS("visorbus:" VISOR_VHBA_CHANNEL_GUID_STR);
40 
41 struct visordisk_info {
42 	struct scsi_device *sdev;
43 	u32 valid;
44 	atomic_t ios_threshold;
45 	atomic_t error_count;
46 	struct visordisk_info *next;
47 };
48 
49 struct scsipending {
50 	struct uiscmdrsp cmdrsp;
51 	/* The Data being tracked */
52 	void *sent;
53 	/* Type of pointer that is being stored */
54 	char cmdtype;
55 };
56 
57 /* Each scsi_host has a host_data area that contains this struct. */
58 struct visorhba_devdata {
59 	struct Scsi_Host *scsihost;
60 	struct visor_device *dev;
61 	struct list_head dev_info_list;
62 	/* Tracks the requests that have been forwarded to
63 	 * the IOVM and haven't returned yet
64 	 */
65 	struct scsipending pending[MAX_PENDING_REQUESTS];
66 	/* Start search for next pending free slot here */
67 	unsigned int nextinsert;
68 	/* lock to protect data in devdata */
69 	spinlock_t privlock;
70 	bool serverdown;
71 	bool serverchangingstate;
72 	unsigned long long acquire_failed_cnt;
73 	unsigned long long interrupts_rcvd;
74 	unsigned long long interrupts_notme;
75 	unsigned long long interrupts_disabled;
76 	u64 __iomem *flags_addr;
77 	atomic_t interrupt_rcvd;
78 	wait_queue_head_t rsp_queue;
79 	struct visordisk_info head;
80 	unsigned int max_buff_len;
81 	int devnum;
82 	struct task_struct *thread;
83 	int thread_wait_ms;
84 
85 	/*
86 	 * allows us to pass int handles back-and-forth between us and
87 	 * iovm, instead of raw pointers
88 	 */
89 	struct idr idr;
90 
91 	struct dentry *debugfs_dir;
92 	struct dentry *debugfs_info;
93 };
94 
95 struct visorhba_devices_open {
96 	struct visorhba_devdata *devdata;
97 };
98 
99 /*
100  * visor_thread_start - Starts a thread for the device
101  * @threadfn:   Function the thread starts
102  * @thrcontext: Context to pass to the thread, i.e. devdata
103  * @name:	String describing name of thread
104  *
105  * Starts a thread for the device.
106  *
107  * Return: The task_struct * denoting the thread on success,
108  *	   or NULL on failure
109  */
visor_thread_start(int (* threadfn)(void *),void * thrcontext,char * name)110 static struct task_struct *visor_thread_start(int (*threadfn)(void *),
111 					      void *thrcontext, char *name)
112 {
113 	struct task_struct *task;
114 
115 	task = kthread_run(threadfn, thrcontext, "%s", name);
116 	if (IS_ERR(task)) {
117 		pr_err("visorbus failed to start thread\n");
118 		return NULL;
119 	}
120 	return task;
121 }
122 
123 /*
124  * visor_thread_stop - Stops the thread if it is running
125  * @task: Description of process to stop
126  */
visor_thread_stop(struct task_struct * task)127 static void visor_thread_stop(struct task_struct *task)
128 {
129 	kthread_stop(task);
130 }
131 
132 /*
133  * add_scsipending_entry - Save off io command that is pending in
134  *			   Service Partition
135  * @devdata: Pointer to devdata
136  * @cmdtype: Specifies the type of command pending
137  * @new:     The command to be saved
138  *
139  * Saves off the io command that is being handled by the Service
140  * Partition so that it can be handled when it completes. If new is
141  * NULL it is assumed the entry refers only to the cmdrsp.
142  *
143  * Return: Insert_location where entry was added on success,
144  *	   -EBUSY if it can't
145  */
add_scsipending_entry(struct visorhba_devdata * devdata,char cmdtype,void * new)146 static int add_scsipending_entry(struct visorhba_devdata *devdata,
147 				 char cmdtype, void *new)
148 {
149 	unsigned long flags;
150 	struct scsipending *entry;
151 	int insert_location;
152 
153 	spin_lock_irqsave(&devdata->privlock, flags);
154 	insert_location = devdata->nextinsert;
155 	while (devdata->pending[insert_location].sent) {
156 		insert_location = (insert_location + 1) % MAX_PENDING_REQUESTS;
157 		if (insert_location == (int)devdata->nextinsert) {
158 			spin_unlock_irqrestore(&devdata->privlock, flags);
159 			return -EBUSY;
160 		}
161 	}
162 
163 	entry = &devdata->pending[insert_location];
164 	memset(&entry->cmdrsp, 0, sizeof(entry->cmdrsp));
165 	entry->cmdtype = cmdtype;
166 	if (new)
167 		entry->sent = new;
168 	/* wants to send cmdrsp */
169 	else
170 		entry->sent = &entry->cmdrsp;
171 	devdata->nextinsert = (insert_location + 1) % MAX_PENDING_REQUESTS;
172 	spin_unlock_irqrestore(&devdata->privlock, flags);
173 
174 	return insert_location;
175 }
176 
177 /*
178  * del_scsipending_ent - Removes an entry from the pending array
179  * @devdata: Device holding the pending array
180  * @del:     Entry to remove
181  *
182  * Removes the entry pointed at by del and returns it.
183  *
184  * Return: The scsipending entry pointed to on success, NULL on failure
185  */
del_scsipending_ent(struct visorhba_devdata * devdata,int del)186 static void *del_scsipending_ent(struct visorhba_devdata *devdata, int del)
187 {
188 	unsigned long flags;
189 	void *sent;
190 
191 	if (del >= MAX_PENDING_REQUESTS)
192 		return NULL;
193 
194 	spin_lock_irqsave(&devdata->privlock, flags);
195 	sent = devdata->pending[del].sent;
196 	devdata->pending[del].cmdtype = 0;
197 	devdata->pending[del].sent = NULL;
198 	spin_unlock_irqrestore(&devdata->privlock, flags);
199 
200 	return sent;
201 }
202 
203 /*
204  * get_scsipending_cmdrsp - Return the cmdrsp stored in a pending entry
205  * @ddata: Device holding the pending array
206  * @ent:   Entry that stores the cmdrsp
207  *
208  * Each scsipending entry has a cmdrsp in it. The cmdrsp is only valid
209  * if the "sent" field is not NULL.
210  *
211  * Return: A pointer to the cmdrsp, NULL on failure
212  */
get_scsipending_cmdrsp(struct visorhba_devdata * ddata,int ent)213 static struct uiscmdrsp *get_scsipending_cmdrsp(struct visorhba_devdata *ddata,
214 						int ent)
215 {
216 	if (ddata->pending[ent].sent)
217 		return &ddata->pending[ent].cmdrsp;
218 
219 	return NULL;
220 }
221 
222 /*
223  * simple_idr_get - Associate a provided pointer with an int value
224  *		    1 <= value <= INT_MAX, and return this int value;
225  *		    the pointer value can be obtained later by passing
226  *		    this int value to idr_find()
227  * @idrtable: The data object maintaining the pointer<-->int mappings
228  * @p:	      The pointer value to be remembered
229  * @lock:     A spinlock used when exclusive access to idrtable is needed
230  *
231  * Return: The id number mapped to pointer 'p', 0 on failure
232  */
simple_idr_get(struct idr * idrtable,void * p,spinlock_t * lock)233 static unsigned int simple_idr_get(struct idr *idrtable, void *p,
234 				   spinlock_t *lock)
235 {
236 	int id;
237 	unsigned long flags;
238 
239 	idr_preload(GFP_KERNEL);
240 	spin_lock_irqsave(lock, flags);
241 	id = idr_alloc(idrtable, p, 1, INT_MAX, GFP_NOWAIT);
242 	spin_unlock_irqrestore(lock, flags);
243 	idr_preload_end();
244 	/* failure */
245 	if (id < 0)
246 		return 0;
247 	/* idr_alloc() guarantees > 0 */
248 	return (unsigned int)(id);
249 }
250 
251 /*
252  * setup_scsitaskmgmt_handles - Stash the necessary handles so that the
253  *				completion processing logic for a taskmgmt
254  *				cmd will be able to find who to wake up
255  *				and where to stash the result
256  * @idrtable: The data object maintaining the pointer<-->int mappings
257  * @lock:     A spinlock used when exclusive access to idrtable is needed
258  * @cmdrsp:   Response from the IOVM
259  * @event:    The event handle to associate with an id
260  * @result:   The location to place the result of the event handle into
261  */
setup_scsitaskmgmt_handles(struct idr * idrtable,spinlock_t * lock,struct uiscmdrsp * cmdrsp,wait_queue_head_t * event,int * result)262 static void setup_scsitaskmgmt_handles(struct idr *idrtable, spinlock_t *lock,
263 				       struct uiscmdrsp *cmdrsp,
264 				       wait_queue_head_t *event, int *result)
265 {
266 	/* specify the event that has to be triggered when this */
267 	/* cmd is complete */
268 	cmdrsp->scsitaskmgmt.notify_handle =
269 		simple_idr_get(idrtable, event, lock);
270 	cmdrsp->scsitaskmgmt.notifyresult_handle =
271 		simple_idr_get(idrtable, result, lock);
272 }
273 
274 /*
275  * cleanup_scsitaskmgmt_handles - Forget handles created by
276  *				  setup_scsitaskmgmt_handles()
277  * @idrtable: The data object maintaining the pointer<-->int mappings
278  * @cmdrsp:   Response from the IOVM
279  */
cleanup_scsitaskmgmt_handles(struct idr * idrtable,struct uiscmdrsp * cmdrsp)280 static void cleanup_scsitaskmgmt_handles(struct idr *idrtable,
281 					 struct uiscmdrsp *cmdrsp)
282 {
283 	if (cmdrsp->scsitaskmgmt.notify_handle)
284 		idr_remove(idrtable, cmdrsp->scsitaskmgmt.notify_handle);
285 	if (cmdrsp->scsitaskmgmt.notifyresult_handle)
286 		idr_remove(idrtable, cmdrsp->scsitaskmgmt.notifyresult_handle);
287 }
288 
289 /*
290  * forward_taskmgmt_command - Send taskmegmt command to the Service
291  *			      Partition
292  * @tasktype: Type of taskmgmt command
293  * @scsidev:  Scsidev that issued command
294  *
295  * Create a cmdrsp packet and send it to the Service Partition
296  * that will service this request.
297  *
298  * Return: Int representing whether command was queued successfully or not
299  */
forward_taskmgmt_command(enum task_mgmt_types tasktype,struct scsi_device * scsidev)300 static int forward_taskmgmt_command(enum task_mgmt_types tasktype,
301 				    struct scsi_device *scsidev)
302 {
303 	struct uiscmdrsp *cmdrsp;
304 	struct visorhba_devdata *devdata =
305 		(struct visorhba_devdata *)scsidev->host->hostdata;
306 	int notifyresult = 0xffff;
307 	wait_queue_head_t notifyevent;
308 	int scsicmd_id;
309 
310 	if (devdata->serverdown || devdata->serverchangingstate)
311 		return FAILED;
312 
313 	scsicmd_id = add_scsipending_entry(devdata, CMD_SCSITASKMGMT_TYPE,
314 					   NULL);
315 	if (scsicmd_id < 0)
316 		return FAILED;
317 
318 	cmdrsp = get_scsipending_cmdrsp(devdata, scsicmd_id);
319 
320 	init_waitqueue_head(&notifyevent);
321 
322 	/* issue TASK_MGMT_ABORT_TASK */
323 	cmdrsp->cmdtype = CMD_SCSITASKMGMT_TYPE;
324 	setup_scsitaskmgmt_handles(&devdata->idr, &devdata->privlock, cmdrsp,
325 				   &notifyevent, &notifyresult);
326 
327 	/* save destination */
328 	cmdrsp->scsitaskmgmt.tasktype = tasktype;
329 	cmdrsp->scsitaskmgmt.vdest.channel = scsidev->channel;
330 	cmdrsp->scsitaskmgmt.vdest.id = scsidev->id;
331 	cmdrsp->scsitaskmgmt.vdest.lun = scsidev->lun;
332 	cmdrsp->scsitaskmgmt.handle = scsicmd_id;
333 
334 	dev_dbg(&scsidev->sdev_gendev,
335 		"visorhba: initiating type=%d taskmgmt command\n", tasktype);
336 	if (visorchannel_signalinsert(devdata->dev->visorchannel,
337 				      IOCHAN_TO_IOPART,
338 				      cmdrsp))
339 		goto err_del_scsipending_ent;
340 
341 	/* It can take the Service Partition up to 35 seconds to complete
342 	 * an IO in some cases, so wait 45 seconds and error out
343 	 */
344 	if (!wait_event_timeout(notifyevent, notifyresult != 0xffff,
345 				msecs_to_jiffies(45000)))
346 		goto err_del_scsipending_ent;
347 
348 	dev_dbg(&scsidev->sdev_gendev,
349 		"visorhba: taskmgmt type=%d success; result=0x%x\n",
350 		 tasktype, notifyresult);
351 	cleanup_scsitaskmgmt_handles(&devdata->idr, cmdrsp);
352 	return SUCCESS;
353 
354 err_del_scsipending_ent:
355 	dev_dbg(&scsidev->sdev_gendev,
356 		"visorhba: taskmgmt type=%d not executed\n", tasktype);
357 	del_scsipending_ent(devdata, scsicmd_id);
358 	cleanup_scsitaskmgmt_handles(&devdata->idr, cmdrsp);
359 	return FAILED;
360 }
361 
362 /*
363  * visorhba_abort_handler - Send TASK_MGMT_ABORT_TASK
364  * @scsicmd: The scsicmd that needs aborted
365  *
366  * Return: SUCCESS if inserted, FAILED otherwise
367  */
visorhba_abort_handler(struct scsi_cmnd * scsicmd)368 static int visorhba_abort_handler(struct scsi_cmnd *scsicmd)
369 {
370 	/* issue TASK_MGMT_ABORT_TASK */
371 	struct scsi_device *scsidev;
372 	struct visordisk_info *vdisk;
373 	int rtn;
374 
375 	scsidev = scsicmd->device;
376 	vdisk = scsidev->hostdata;
377 	if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
378 		atomic_inc(&vdisk->error_count);
379 	else
380 		atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
381 	rtn = forward_taskmgmt_command(TASK_MGMT_ABORT_TASK, scsidev);
382 	if (rtn == SUCCESS) {
383 		scsicmd->result = DID_ABORT << 16;
384 		scsicmd->scsi_done(scsicmd);
385 	}
386 	return rtn;
387 }
388 
389 /*
390  * visorhba_device_reset_handler - Send TASK_MGMT_LUN_RESET
391  * @scsicmd: The scsicmd that needs aborted
392  *
393  * Return: SUCCESS if inserted, FAILED otherwise
394  */
visorhba_device_reset_handler(struct scsi_cmnd * scsicmd)395 static int visorhba_device_reset_handler(struct scsi_cmnd *scsicmd)
396 {
397 	/* issue TASK_MGMT_LUN_RESET */
398 	struct scsi_device *scsidev;
399 	struct visordisk_info *vdisk;
400 	int rtn;
401 
402 	scsidev = scsicmd->device;
403 	vdisk = scsidev->hostdata;
404 	if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
405 		atomic_inc(&vdisk->error_count);
406 	else
407 		atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
408 	rtn = forward_taskmgmt_command(TASK_MGMT_LUN_RESET, scsidev);
409 	if (rtn == SUCCESS) {
410 		scsicmd->result = DID_RESET << 16;
411 		scsicmd->scsi_done(scsicmd);
412 	}
413 	return rtn;
414 }
415 
416 /*
417  * visorhba_bus_reset_handler - Send TASK_MGMT_TARGET_RESET for each
418  *				target on the bus
419  * @scsicmd: The scsicmd that needs aborted
420  *
421  * Return: SUCCESS if inserted, FAILED otherwise
422  */
visorhba_bus_reset_handler(struct scsi_cmnd * scsicmd)423 static int visorhba_bus_reset_handler(struct scsi_cmnd *scsicmd)
424 {
425 	struct scsi_device *scsidev;
426 	struct visordisk_info *vdisk;
427 	int rtn;
428 
429 	scsidev = scsicmd->device;
430 	shost_for_each_device(scsidev, scsidev->host) {
431 		vdisk = scsidev->hostdata;
432 		if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
433 			atomic_inc(&vdisk->error_count);
434 		else
435 			atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
436 	}
437 	rtn = forward_taskmgmt_command(TASK_MGMT_BUS_RESET, scsidev);
438 	if (rtn == SUCCESS) {
439 		scsicmd->result = DID_RESET << 16;
440 		scsicmd->scsi_done(scsicmd);
441 	}
442 	return rtn;
443 }
444 
445 /*
446  * visorhba_host_reset_handler - Not supported
447  * @scsicmd: The scsicmd that needs to be aborted
448  *
449  * Return: Not supported, return SUCCESS
450  */
visorhba_host_reset_handler(struct scsi_cmnd * scsicmd)451 static int visorhba_host_reset_handler(struct scsi_cmnd *scsicmd)
452 {
453 	/* issue TASK_MGMT_TARGET_RESET for each target on each bus for host */
454 	return SUCCESS;
455 }
456 
457 /*
458  * visorhba_get_info - Get information about SCSI device
459  * @shp: Scsi host that is requesting information
460  *
461  * Return: String with visorhba information
462  */
visorhba_get_info(struct Scsi_Host * shp)463 static const char *visorhba_get_info(struct Scsi_Host *shp)
464 {
465 	/* Return version string */
466 	return "visorhba";
467 }
468 
469 /*
470  * dma_data_dir_linux_to_spar - convert dma_data_direction value to
471  *				Unisys-specific equivalent
472  * @d: dma direction value to convert
473  *
474  * Returns the Unisys-specific dma direction value corresponding to @d
475  */
dma_data_dir_linux_to_spar(enum dma_data_direction d)476 static u32 dma_data_dir_linux_to_spar(enum dma_data_direction d)
477 {
478 	switch (d) {
479 	case DMA_BIDIRECTIONAL:
480 		return UIS_DMA_BIDIRECTIONAL;
481 	case DMA_TO_DEVICE:
482 		return UIS_DMA_TO_DEVICE;
483 	case DMA_FROM_DEVICE:
484 		return UIS_DMA_FROM_DEVICE;
485 	case DMA_NONE:
486 		return UIS_DMA_NONE;
487 	default:
488 		return UIS_DMA_NONE;
489 	}
490 }
491 
492 /*
493  * visorhba_queue_command_lck - Queues command to the Service Partition
494  * @scsicmd:		Command to be queued
495  * @vsiorhba_cmnd_done: Done command to call when scsicmd is returned
496  *
497  * Queues to scsicmd to the ServicePartition after converting it to a
498  * uiscmdrsp structure.
499  *
500  * Return: 0 if successfully queued to the Service Partition, otherwise
501  *	   error code
502  */
visorhba_queue_command_lck(struct scsi_cmnd * scsicmd,void (* visorhba_cmnd_done)(struct scsi_cmnd *))503 static int visorhba_queue_command_lck(struct scsi_cmnd *scsicmd,
504 				      void (*visorhba_cmnd_done)
505 					   (struct scsi_cmnd *))
506 {
507 	struct uiscmdrsp *cmdrsp;
508 	struct scsi_device *scsidev = scsicmd->device;
509 	int insert_location;
510 	unsigned char *cdb = scsicmd->cmnd;
511 	struct Scsi_Host *scsihost = scsidev->host;
512 	unsigned int i;
513 	struct visorhba_devdata *devdata =
514 		(struct visorhba_devdata *)scsihost->hostdata;
515 	struct scatterlist *sg = NULL;
516 	struct scatterlist *sglist = NULL;
517 
518 	if (devdata->serverdown || devdata->serverchangingstate)
519 		return SCSI_MLQUEUE_DEVICE_BUSY;
520 
521 	insert_location = add_scsipending_entry(devdata, CMD_SCSI_TYPE,
522 						(void *)scsicmd);
523 	if (insert_location < 0)
524 		return SCSI_MLQUEUE_DEVICE_BUSY;
525 
526 	cmdrsp = get_scsipending_cmdrsp(devdata, insert_location);
527 	cmdrsp->cmdtype = CMD_SCSI_TYPE;
528 	/* save the pending insertion location. Deletion from pending
529 	 * will return the scsicmd pointer for completion
530 	 */
531 	cmdrsp->scsi.handle = insert_location;
532 
533 	/* save done function that we have call when cmd is complete */
534 	scsicmd->scsi_done = visorhba_cmnd_done;
535 	/* save destination */
536 	cmdrsp->scsi.vdest.channel = scsidev->channel;
537 	cmdrsp->scsi.vdest.id = scsidev->id;
538 	cmdrsp->scsi.vdest.lun = scsidev->lun;
539 	/* save datadir */
540 	cmdrsp->scsi.data_dir =
541 		dma_data_dir_linux_to_spar(scsicmd->sc_data_direction);
542 	memcpy(cmdrsp->scsi.cmnd, cdb, MAX_CMND_SIZE);
543 	cmdrsp->scsi.bufflen = scsi_bufflen(scsicmd);
544 
545 	/* keep track of the max buffer length so far. */
546 	if (cmdrsp->scsi.bufflen > devdata->max_buff_len)
547 		devdata->max_buff_len = cmdrsp->scsi.bufflen;
548 
549 	if (scsi_sg_count(scsicmd) > MAX_PHYS_INFO)
550 		goto err_del_scsipending_ent;
551 
552 	/* convert buffer to phys information  */
553 	/* buffer is scatterlist - copy it out */
554 	sglist = scsi_sglist(scsicmd);
555 
556 	for_each_sg(sglist, sg, scsi_sg_count(scsicmd), i) {
557 		cmdrsp->scsi.gpi_list[i].address = sg_phys(sg);
558 		cmdrsp->scsi.gpi_list[i].length = sg->length;
559 	}
560 	cmdrsp->scsi.guest_phys_entries = scsi_sg_count(scsicmd);
561 
562 	if (visorchannel_signalinsert(devdata->dev->visorchannel,
563 				      IOCHAN_TO_IOPART,
564 				      cmdrsp))
565 		/* queue must be full and we aren't going to wait */
566 		goto err_del_scsipending_ent;
567 
568 	return 0;
569 
570 err_del_scsipending_ent:
571 	del_scsipending_ent(devdata, insert_location);
572 	return SCSI_MLQUEUE_DEVICE_BUSY;
573 }
574 
575 #ifdef DEF_SCSI_QCMD
DEF_SCSI_QCMD(visorhba_queue_command)576 static DEF_SCSI_QCMD(visorhba_queue_command)
577 #else
578 #define visorhba_queue_command visorhba_queue_command_lck
579 #endif
580 
581 /*
582  * visorhba_slave_alloc - Called when new disk is discovered
583  * @scsidev: New disk
584  *
585  * Create a new visordisk_info structure and add it to our
586  * list of vdisks.
587  *
588  * Return: 0 on success, -ENOMEM on failure.
589  */
590 static int visorhba_slave_alloc(struct scsi_device *scsidev)
591 {
592 	/* this is called by the midlayer before scan for new devices --
593 	 * LLD can alloc any struct & do init if needed.
594 	 */
595 	struct visordisk_info *vdisk;
596 	struct visorhba_devdata *devdata;
597 	struct Scsi_Host *scsihost = (struct Scsi_Host *)scsidev->host;
598 
599 	/* already allocated return success */
600 	if (scsidev->hostdata)
601 		return 0;
602 
603 	/* even though we errored, treat as success */
604 	devdata = (struct visorhba_devdata *)scsihost->hostdata;
605 	if (!devdata)
606 		return 0;
607 
608 	vdisk = kzalloc(sizeof(*vdisk), GFP_ATOMIC);
609 	if (!vdisk)
610 		return -ENOMEM;
611 
612 	vdisk->sdev = scsidev;
613 	scsidev->hostdata = vdisk;
614 	return 0;
615 }
616 
617 /*
618  * visorhba_slave_destroy - Disk is going away, clean up resources.
619  * @scsidev: Scsi device to destroy
620  */
visorhba_slave_destroy(struct scsi_device * scsidev)621 static void visorhba_slave_destroy(struct scsi_device *scsidev)
622 {
623 	/* midlevel calls this after device has been quiesced and
624 	 * before it is to be deleted.
625 	 */
626 	struct visordisk_info *vdisk;
627 
628 	vdisk = scsidev->hostdata;
629 	scsidev->hostdata = NULL;
630 	kfree(vdisk);
631 }
632 
633 static struct scsi_host_template visorhba_driver_template = {
634 	.name = "Unisys Visor HBA",
635 	.info = visorhba_get_info,
636 	.queuecommand = visorhba_queue_command,
637 	.eh_abort_handler = visorhba_abort_handler,
638 	.eh_device_reset_handler = visorhba_device_reset_handler,
639 	.eh_bus_reset_handler = visorhba_bus_reset_handler,
640 	.eh_host_reset_handler = visorhba_host_reset_handler,
641 	.shost_attrs = NULL,
642 #define visorhba_MAX_CMNDS 128
643 	.can_queue = visorhba_MAX_CMNDS,
644 	.sg_tablesize = 64,
645 	.this_id = -1,
646 	.slave_alloc = visorhba_slave_alloc,
647 	.slave_destroy = visorhba_slave_destroy,
648 };
649 
650 /*
651  * info_debugfs_show - Debugfs interface to dump visorhba states
652  * @seq: The sequence file to write information to
653  * @v:   Unused, but needed for use with seq file single_open invocation
654  *
655  * Presents a file in the debugfs tree named: /visorhba/vbus<x>:dev<y>/info.
656  *
657  * Return: SUCCESS
658  */
info_debugfs_show(struct seq_file * seq,void * v)659 static int info_debugfs_show(struct seq_file *seq, void *v)
660 {
661 	struct visorhba_devdata *devdata = seq->private;
662 
663 	seq_printf(seq, "max_buff_len = %u\n", devdata->max_buff_len);
664 	seq_printf(seq, "interrupts_rcvd = %llu\n", devdata->interrupts_rcvd);
665 	seq_printf(seq, "interrupts_disabled = %llu\n",
666 		   devdata->interrupts_disabled);
667 	seq_printf(seq, "interrupts_notme = %llu\n",
668 		   devdata->interrupts_notme);
669 	seq_printf(seq, "flags_addr = %p\n", devdata->flags_addr);
670 	if (devdata->flags_addr) {
671 		u64 phys_flags_addr =
672 			virt_to_phys((__force  void *)devdata->flags_addr);
673 		seq_printf(seq, "phys_flags_addr = 0x%016llx\n",
674 			   phys_flags_addr);
675 		seq_printf(seq, "FeatureFlags = %llu\n",
676 			   (u64)readq(devdata->flags_addr));
677 	}
678 	seq_printf(seq, "acquire_failed_cnt = %llu\n",
679 		   devdata->acquire_failed_cnt);
680 
681 	return 0;
682 }
683 DEFINE_SHOW_ATTRIBUTE(info_debugfs);
684 
685 /*
686  * complete_taskmgmt_command - Complete task management
687  * @idrtable: The data object maintaining the pointer<-->int mappings
688  * @cmdrsp:   Response from the IOVM
689  * @result:   The result of the task management command
690  *
691  * Service Partition returned the result of the task management
692  * command. Wake up anyone waiting for it.
693  */
complete_taskmgmt_command(struct idr * idrtable,struct uiscmdrsp * cmdrsp,int result)694 static void complete_taskmgmt_command(struct idr *idrtable,
695 				      struct uiscmdrsp *cmdrsp, int result)
696 {
697 	wait_queue_head_t *wq =
698 		idr_find(idrtable, cmdrsp->scsitaskmgmt.notify_handle);
699 	int *scsi_result_ptr =
700 		idr_find(idrtable, cmdrsp->scsitaskmgmt.notifyresult_handle);
701 	if (unlikely(!(wq && scsi_result_ptr))) {
702 		pr_err("visorhba: no completion context; cmd will time out\n");
703 		return;
704 	}
705 
706 	/* copy the result of the taskmgmt and
707 	 * wake up the error handler that is waiting for this
708 	 */
709 	pr_debug("visorhba: notifying initiator with result=0x%x\n", result);
710 	*scsi_result_ptr = result;
711 	wake_up_all(wq);
712 }
713 
714 /*
715  * visorhba_serverdown_complete - Called when we are done cleaning up
716  *				  from serverdown
717  * @devdata: Visorhba instance on which to complete serverdown
718  *
719  * Called when we are done cleanning up from serverdown, stop processing
720  * queue, fail pending IOs.
721  */
visorhba_serverdown_complete(struct visorhba_devdata * devdata)722 static void visorhba_serverdown_complete(struct visorhba_devdata *devdata)
723 {
724 	int i;
725 	struct scsipending *pendingdel = NULL;
726 	struct scsi_cmnd *scsicmd = NULL;
727 	struct uiscmdrsp *cmdrsp;
728 	unsigned long flags;
729 
730 	/* Stop using the IOVM response queue (queue should be drained
731 	 * by the end)
732 	 */
733 	visor_thread_stop(devdata->thread);
734 
735 	/* Fail commands that weren't completed */
736 	spin_lock_irqsave(&devdata->privlock, flags);
737 	for (i = 0; i < MAX_PENDING_REQUESTS; i++) {
738 		pendingdel = &devdata->pending[i];
739 		switch (pendingdel->cmdtype) {
740 		case CMD_SCSI_TYPE:
741 			scsicmd = pendingdel->sent;
742 			scsicmd->result = DID_RESET << 16;
743 			if (scsicmd->scsi_done)
744 				scsicmd->scsi_done(scsicmd);
745 			break;
746 		case CMD_SCSITASKMGMT_TYPE:
747 			cmdrsp = pendingdel->sent;
748 			complete_taskmgmt_command(&devdata->idr, cmdrsp,
749 						  TASK_MGMT_FAILED);
750 			break;
751 		default:
752 			break;
753 		}
754 		pendingdel->cmdtype = 0;
755 		pendingdel->sent = NULL;
756 	}
757 	spin_unlock_irqrestore(&devdata->privlock, flags);
758 
759 	devdata->serverdown = true;
760 	devdata->serverchangingstate = false;
761 }
762 
763 /*
764  * visorhba_serverdown - Got notified that the IOVM is down
765  * @devdata: Visorhba that is being serviced by downed IOVM
766  *
767  * Something happened to the IOVM, return immediately and
768  * schedule cleanup work.
769  *
770  * Return: 0 on success, -EINVAL on failure
771  */
visorhba_serverdown(struct visorhba_devdata * devdata)772 static int visorhba_serverdown(struct visorhba_devdata *devdata)
773 {
774 	if (!devdata->serverdown && !devdata->serverchangingstate) {
775 		devdata->serverchangingstate = true;
776 		visorhba_serverdown_complete(devdata);
777 	} else if (devdata->serverchangingstate) {
778 		return -EINVAL;
779 	}
780 	return 0;
781 }
782 
783 /*
784  * do_scsi_linuxstat - Scsi command returned linuxstat
785  * @cmdrsp:  Response from IOVM
786  * @scsicmd: Command issued
787  *
788  * Don't log errors for disk-not-present inquiries.
789  */
do_scsi_linuxstat(struct uiscmdrsp * cmdrsp,struct scsi_cmnd * scsicmd)790 static void do_scsi_linuxstat(struct uiscmdrsp *cmdrsp,
791 			      struct scsi_cmnd *scsicmd)
792 {
793 	struct visordisk_info *vdisk;
794 	struct scsi_device *scsidev;
795 
796 	scsidev = scsicmd->device;
797 	memcpy(scsicmd->sense_buffer, cmdrsp->scsi.sensebuf, MAX_SENSE_SIZE);
798 
799 	/* Do not log errors for disk-not-present inquiries */
800 	if (cmdrsp->scsi.cmnd[0] == INQUIRY &&
801 	    (host_byte(cmdrsp->scsi.linuxstat) == DID_NO_CONNECT) &&
802 	    cmdrsp->scsi.addlstat == ADDL_SEL_TIMEOUT)
803 		return;
804 	/* Okay see what our error_count is here.... */
805 	vdisk = scsidev->hostdata;
806 	if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT) {
807 		atomic_inc(&vdisk->error_count);
808 		atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
809 	}
810 }
811 
set_no_disk_inquiry_result(unsigned char * buf,size_t len,bool is_lun0)812 static int set_no_disk_inquiry_result(unsigned char *buf, size_t len,
813 				      bool is_lun0)
814 {
815 	if (len < NO_DISK_INQUIRY_RESULT_LEN)
816 		return -EINVAL;
817 	memset(buf, 0, NO_DISK_INQUIRY_RESULT_LEN);
818 	buf[2] = SCSI_SPC2_VER;
819 	if (is_lun0) {
820 		buf[0] = DEV_DISK_CAPABLE_NOT_PRESENT;
821 		buf[3] = DEV_HISUPPORT;
822 	} else {
823 		buf[0] = DEV_NOT_CAPABLE;
824 	}
825 	buf[4] = NO_DISK_INQUIRY_RESULT_LEN - 5;
826 	strncpy(buf + 8, "DELLPSEUDO DEVICE .", NO_DISK_INQUIRY_RESULT_LEN - 8);
827 	return 0;
828 }
829 
830 /*
831  * do_scsi_nolinuxstat - Scsi command didn't have linuxstat
832  * @cmdrsp:  Response from IOVM
833  * @scsicmd: Command issued
834  *
835  * Handle response when no linuxstat was returned.
836  */
do_scsi_nolinuxstat(struct uiscmdrsp * cmdrsp,struct scsi_cmnd * scsicmd)837 static void do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp,
838 				struct scsi_cmnd *scsicmd)
839 {
840 	struct scsi_device *scsidev;
841 	unsigned char *buf;
842 	struct scatterlist *sg;
843 	unsigned int i;
844 	char *this_page;
845 	char *this_page_orig;
846 	int bufind = 0;
847 	struct visordisk_info *vdisk;
848 
849 	scsidev = scsicmd->device;
850 	if (cmdrsp->scsi.cmnd[0] == INQUIRY &&
851 	    cmdrsp->scsi.bufflen >= MIN_INQUIRY_RESULT_LEN) {
852 		if (cmdrsp->scsi.no_disk_result == 0)
853 			return;
854 
855 		buf = kzalloc(36, GFP_KERNEL);
856 		if (!buf)
857 			return;
858 
859 		/* Linux scsi code wants a device at Lun 0
860 		 * to issue report luns, but we don't want
861 		 * a disk there so we'll present a processor
862 		 * there.
863 		 */
864 		set_no_disk_inquiry_result(buf, (size_t)cmdrsp->scsi.bufflen,
865 					   scsidev->lun == 0);
866 
867 		if (scsi_sg_count(scsicmd) == 0) {
868 			memcpy(scsi_sglist(scsicmd), buf,
869 			       cmdrsp->scsi.bufflen);
870 			kfree(buf);
871 			return;
872 		}
873 
874 		scsi_for_each_sg(scsicmd, sg, scsi_sg_count(scsicmd), i) {
875 			this_page_orig = kmap_atomic(sg_page(sg));
876 			this_page = (void *)((unsigned long)this_page_orig |
877 					     sg->offset);
878 			memcpy(this_page, buf + bufind, sg->length);
879 			kunmap_atomic(this_page_orig);
880 		}
881 		kfree(buf);
882 	} else {
883 		vdisk = scsidev->hostdata;
884 		if (atomic_read(&vdisk->ios_threshold) > 0) {
885 			atomic_dec(&vdisk->ios_threshold);
886 			if (atomic_read(&vdisk->ios_threshold) == 0)
887 				atomic_set(&vdisk->error_count, 0);
888 		}
889 	}
890 }
891 
892 /*
893  * complete_scsi_command - Complete a scsi command
894  * @uiscmdrsp: Response from Service Partition
895  * @scsicmd:   The scsi command
896  *
897  * Response was returned by the Service Partition. Finish it and send
898  * completion to the scsi midlayer.
899  */
complete_scsi_command(struct uiscmdrsp * cmdrsp,struct scsi_cmnd * scsicmd)900 static void complete_scsi_command(struct uiscmdrsp *cmdrsp,
901 				  struct scsi_cmnd *scsicmd)
902 {
903 	/* take what we need out of cmdrsp and complete the scsicmd */
904 	scsicmd->result = cmdrsp->scsi.linuxstat;
905 	if (cmdrsp->scsi.linuxstat)
906 		do_scsi_linuxstat(cmdrsp, scsicmd);
907 	else
908 		do_scsi_nolinuxstat(cmdrsp, scsicmd);
909 
910 	scsicmd->scsi_done(scsicmd);
911 }
912 
913 /*
914  * drain_queue - Pull responses out of iochannel
915  * @cmdrsp:  Response from the IOSP
916  * @devdata: Device that owns this iochannel
917  *
918  * Pulls responses out of the iochannel and process the responses.
919  */
drain_queue(struct uiscmdrsp * cmdrsp,struct visorhba_devdata * devdata)920 static void drain_queue(struct uiscmdrsp *cmdrsp,
921 			struct visorhba_devdata *devdata)
922 {
923 	struct scsi_cmnd *scsicmd;
924 
925 	while (1) {
926 		/* queue empty */
927 		if (visorchannel_signalremove(devdata->dev->visorchannel,
928 					      IOCHAN_FROM_IOPART,
929 					      cmdrsp))
930 			break;
931 		if (cmdrsp->cmdtype == CMD_SCSI_TYPE) {
932 			/* scsicmd location is returned by the
933 			 * deletion
934 			 */
935 			scsicmd = del_scsipending_ent(devdata,
936 						      cmdrsp->scsi.handle);
937 			if (!scsicmd)
938 				break;
939 			/* complete the orig cmd */
940 			complete_scsi_command(cmdrsp, scsicmd);
941 		} else if (cmdrsp->cmdtype == CMD_SCSITASKMGMT_TYPE) {
942 			if (!del_scsipending_ent(devdata,
943 						 cmdrsp->scsitaskmgmt.handle))
944 				break;
945 			complete_taskmgmt_command(&devdata->idr, cmdrsp,
946 						  cmdrsp->scsitaskmgmt.result);
947 		} else if (cmdrsp->cmdtype == CMD_NOTIFYGUEST_TYPE)
948 			dev_err_once(&devdata->dev->device,
949 				     "ignoring unsupported NOTIFYGUEST\n");
950 		/* cmdrsp is now available for re-use */
951 	}
952 }
953 
954 /*
955  * process_incoming_rsps - Process responses from IOSP
956  * @v:  Void pointer to visorhba_devdata
957  *
958  * Main function for the thread that processes the responses
959  * from the IO Service Partition. When the queue is empty, wait
960  * to check to see if it is full again.
961  *
962  * Return: 0 on success, -ENOMEM on failure
963  */
process_incoming_rsps(void * v)964 static int process_incoming_rsps(void *v)
965 {
966 	struct visorhba_devdata *devdata = v;
967 	struct uiscmdrsp *cmdrsp = NULL;
968 	const int size = sizeof(*cmdrsp);
969 
970 	cmdrsp = kmalloc(size, GFP_ATOMIC);
971 	if (!cmdrsp)
972 		return -ENOMEM;
973 
974 	while (1) {
975 		if (kthread_should_stop())
976 			break;
977 		wait_event_interruptible_timeout(
978 			devdata->rsp_queue, (atomic_read(
979 					     &devdata->interrupt_rcvd) == 1),
980 				msecs_to_jiffies(devdata->thread_wait_ms));
981 		/* drain queue */
982 		drain_queue(cmdrsp, devdata);
983 	}
984 	kfree(cmdrsp);
985 	return 0;
986 }
987 
988 /*
989  * visorhba_pause - Function to handle visorbus pause messages
990  * @dev:	   Device that is pausing
991  * @complete_func: Function to call when finished
992  *
993  * Something has happened to the IO Service Partition that is
994  * handling this device. Quiet this device and reset commands
995  * so that the Service Partition can be corrected.
996  *
997  * Return: SUCCESS
998  */
visorhba_pause(struct visor_device * dev,visorbus_state_complete_func complete_func)999 static int visorhba_pause(struct visor_device *dev,
1000 			  visorbus_state_complete_func complete_func)
1001 {
1002 	struct visorhba_devdata *devdata = dev_get_drvdata(&dev->device);
1003 
1004 	visorhba_serverdown(devdata);
1005 	complete_func(dev, 0);
1006 	return 0;
1007 }
1008 
1009 /*
1010  * visorhba_resume - Function called when the IO Service Partition is back
1011  * @dev:	   Device that is pausing
1012  * @complete_func: Function to call when finished
1013  *
1014  * Yay! The IO Service Partition is back, the channel has been wiped
1015  * so lets re-establish connection and start processing responses.
1016  *
1017  * Return: 0 on success, -EINVAL on failure
1018  */
visorhba_resume(struct visor_device * dev,visorbus_state_complete_func complete_func)1019 static int visorhba_resume(struct visor_device *dev,
1020 			   visorbus_state_complete_func complete_func)
1021 {
1022 	struct visorhba_devdata *devdata;
1023 
1024 	devdata = dev_get_drvdata(&dev->device);
1025 	if (!devdata)
1026 		return -EINVAL;
1027 
1028 	if (devdata->serverdown && !devdata->serverchangingstate)
1029 		devdata->serverchangingstate = true;
1030 
1031 	devdata->thread = visor_thread_start(process_incoming_rsps, devdata,
1032 					     "vhba_incming");
1033 	devdata->serverdown = false;
1034 	devdata->serverchangingstate = false;
1035 
1036 	return 0;
1037 }
1038 
1039 /*
1040  * visorhba_probe - Device has been discovered; do acquire
1041  * @dev: visor_device that was discovered
1042  *
1043  * A new HBA was discovered; do the initial connections of it.
1044  *
1045  * Return: 0 on success, otherwise error code
1046  */
visorhba_probe(struct visor_device * dev)1047 static int visorhba_probe(struct visor_device *dev)
1048 {
1049 	struct Scsi_Host *scsihost;
1050 	struct vhba_config_max max;
1051 	struct visorhba_devdata *devdata = NULL;
1052 	int err, channel_offset;
1053 	u64 features;
1054 
1055 	scsihost = scsi_host_alloc(&visorhba_driver_template,
1056 				   sizeof(*devdata));
1057 	if (!scsihost)
1058 		return -ENODEV;
1059 
1060 	channel_offset = offsetof(struct visor_io_channel, vhba.max);
1061 	err = visorbus_read_channel(dev, channel_offset, &max,
1062 				    sizeof(struct vhba_config_max));
1063 	if (err < 0)
1064 		goto err_scsi_host_put;
1065 
1066 	scsihost->max_id = (unsigned int)max.max_id;
1067 	scsihost->max_lun = (unsigned int)max.max_lun;
1068 	scsihost->cmd_per_lun = (unsigned int)max.cmd_per_lun;
1069 	scsihost->max_sectors =
1070 	    (unsigned short)(max.max_io_size >> 9);
1071 	scsihost->sg_tablesize =
1072 	    (unsigned short)(max.max_io_size / PAGE_SIZE);
1073 	if (scsihost->sg_tablesize > MAX_PHYS_INFO)
1074 		scsihost->sg_tablesize = MAX_PHYS_INFO;
1075 	err = scsi_add_host(scsihost, &dev->device);
1076 	if (err < 0)
1077 		goto err_scsi_host_put;
1078 
1079 	devdata = (struct visorhba_devdata *)scsihost->hostdata;
1080 	devdata->dev = dev;
1081 	dev_set_drvdata(&dev->device, devdata);
1082 
1083 	devdata->debugfs_dir = debugfs_create_dir(dev_name(&dev->device),
1084 						  visorhba_debugfs_dir);
1085 	if (!devdata->debugfs_dir) {
1086 		err = -ENOMEM;
1087 		goto err_scsi_remove_host;
1088 	}
1089 	devdata->debugfs_info =
1090 		debugfs_create_file("info", 0440,
1091 				    devdata->debugfs_dir, devdata,
1092 				    &info_debugfs_fops);
1093 	if (!devdata->debugfs_info) {
1094 		err = -ENOMEM;
1095 		goto err_debugfs_dir;
1096 	}
1097 
1098 	init_waitqueue_head(&devdata->rsp_queue);
1099 	spin_lock_init(&devdata->privlock);
1100 	devdata->serverdown = false;
1101 	devdata->serverchangingstate = false;
1102 	devdata->scsihost = scsihost;
1103 
1104 	channel_offset = offsetof(struct visor_io_channel,
1105 				  channel_header.features);
1106 	err = visorbus_read_channel(dev, channel_offset, &features, 8);
1107 	if (err)
1108 		goto err_debugfs_info;
1109 	features |= VISOR_CHANNEL_IS_POLLING;
1110 	err = visorbus_write_channel(dev, channel_offset, &features, 8);
1111 	if (err)
1112 		goto err_debugfs_info;
1113 
1114 	idr_init(&devdata->idr);
1115 
1116 	devdata->thread_wait_ms = 2;
1117 	devdata->thread = visor_thread_start(process_incoming_rsps, devdata,
1118 					     "vhba_incoming");
1119 
1120 	scsi_scan_host(scsihost);
1121 
1122 	return 0;
1123 
1124 err_debugfs_info:
1125 	debugfs_remove(devdata->debugfs_info);
1126 
1127 err_debugfs_dir:
1128 	debugfs_remove_recursive(devdata->debugfs_dir);
1129 
1130 err_scsi_remove_host:
1131 	scsi_remove_host(scsihost);
1132 
1133 err_scsi_host_put:
1134 	scsi_host_put(scsihost);
1135 	return err;
1136 }
1137 
1138 /*
1139  * visorhba_remove - Remove a visorhba device
1140  * @dev: Device to remove
1141  *
1142  * Removes the visorhba device.
1143  */
visorhba_remove(struct visor_device * dev)1144 static void visorhba_remove(struct visor_device *dev)
1145 {
1146 	struct visorhba_devdata *devdata = dev_get_drvdata(&dev->device);
1147 	struct Scsi_Host *scsihost = NULL;
1148 
1149 	if (!devdata)
1150 		return;
1151 
1152 	scsihost = devdata->scsihost;
1153 	visor_thread_stop(devdata->thread);
1154 	scsi_remove_host(scsihost);
1155 	scsi_host_put(scsihost);
1156 
1157 	idr_destroy(&devdata->idr);
1158 
1159 	dev_set_drvdata(&dev->device, NULL);
1160 	debugfs_remove(devdata->debugfs_info);
1161 	debugfs_remove_recursive(devdata->debugfs_dir);
1162 }
1163 
1164 /* This is used to tell the visorbus driver which types of visor devices
1165  * we support, and what functions to call when a visor device that we support
1166  * is attached or removed.
1167  */
1168 static struct visor_driver visorhba_driver = {
1169 	.name = "visorhba",
1170 	.owner = THIS_MODULE,
1171 	.channel_types = visorhba_channel_types,
1172 	.probe = visorhba_probe,
1173 	.remove = visorhba_remove,
1174 	.pause = visorhba_pause,
1175 	.resume = visorhba_resume,
1176 	.channel_interrupt = NULL,
1177 };
1178 
1179 /*
1180  * visorhba_init - Driver init routine
1181  *
1182  * Initialize the visorhba driver and register it with visorbus
1183  * to handle s-Par virtual host bus adapter.
1184  *
1185  * Return: 0 on success, error code otherwise
1186  */
visorhba_init(void)1187 static int visorhba_init(void)
1188 {
1189 	int rc;
1190 
1191 	visorhba_debugfs_dir = debugfs_create_dir("visorhba", NULL);
1192 	if (!visorhba_debugfs_dir)
1193 		return -ENOMEM;
1194 
1195 	rc = visorbus_register_visor_driver(&visorhba_driver);
1196 	if (rc)
1197 		goto cleanup_debugfs;
1198 
1199 	return 0;
1200 
1201 cleanup_debugfs:
1202 	debugfs_remove_recursive(visorhba_debugfs_dir);
1203 
1204 	return rc;
1205 }
1206 
1207 /*
1208  * visorhba_exit - Driver exit routine
1209  *
1210  * Unregister driver from the bus and free up memory.
1211  */
visorhba_exit(void)1212 static void visorhba_exit(void)
1213 {
1214 	visorbus_unregister_visor_driver(&visorhba_driver);
1215 	debugfs_remove_recursive(visorhba_debugfs_dir);
1216 }
1217 
1218 module_init(visorhba_init);
1219 module_exit(visorhba_exit);
1220 
1221 MODULE_AUTHOR("Unisys");
1222 MODULE_LICENSE("GPL");
1223 MODULE_DESCRIPTION("s-Par HBA driver for virtual SCSI host busses");
1224