1 /*
2  *
3  * Intel Management Engine Interface (Intel MEI) Linux driver
4  * Copyright (c) 2003-2011, Intel Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  */
16 
17 #include <linux/module.h>
18 #include <linux/moduleparam.h>
19 #include <linux/kernel.h>
20 #include <linux/device.h>
21 #include <linux/fs.h>
22 #include <linux/errno.h>
23 #include <linux/types.h>
24 #include <linux/fcntl.h>
25 #include <linux/aio.h>
26 #include <linux/pci.h>
27 #include <linux/poll.h>
28 #include <linux/init.h>
29 #include <linux/ioctl.h>
30 #include <linux/cdev.h>
31 #include <linux/sched.h>
32 #include <linux/uuid.h>
33 #include <linux/compat.h>
34 #include <linux/jiffies.h>
35 #include <linux/interrupt.h>
36 #include <linux/miscdevice.h>
37 
38 #include "mei_dev.h"
39 #include "mei.h"
40 #include "interface.h"
41 #include "mei_version.h"
42 
43 
44 #define MEI_READ_TIMEOUT 45
45 #define MEI_DRIVER_NAME	"mei"
46 #define MEI_DEV_NAME "mei"
47 
48 /*
49  *  mei driver strings
50  */
51 static char mei_driver_name[] = MEI_DRIVER_NAME;
52 static const char mei_driver_string[] = "Intel(R) Management Engine Interface";
53 static const char mei_driver_version[] = MEI_DRIVER_VERSION;
54 
55 /* The device pointer */
56 /* Currently this driver works as long as there is only a single AMT device. */
57 struct pci_dev *mei_device;
58 
59 /* mei_pci_tbl - PCI Device ID Table */
60 static DEFINE_PCI_DEVICE_TABLE(mei_pci_tbl) = {
61 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82946GZ)},
62 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G35)},
63 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82Q965)},
64 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G965)},
65 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GM965)},
66 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GME965)},
67 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q35)},
68 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82G33)},
69 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q33)},
70 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82X38)},
71 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_3200)},
72 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_6)},
73 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_7)},
74 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_8)},
75 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_9)},
76 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_10)},
77 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_1)},
78 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_2)},
79 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_3)},
80 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_4)},
81 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_1)},
82 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_2)},
83 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_3)},
84 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_4)},
85 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_1)},
86 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_2)},
87 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_CPT_1)},
88 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PBG_1)},
89 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_1)},
90 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_2)},
91 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_3)},
92 
93 	/* required last entry */
94 	{0, }
95 };
96 
97 MODULE_DEVICE_TABLE(pci, mei_pci_tbl);
98 
99 static DEFINE_MUTEX(mei_mutex);
100 
101 
102 /**
103  * mei_clear_list - removes all callbacks associated with file
104  *		from mei_cb_list
105  *
106  * @dev: device structure.
107  * @file: file structure
108  * @mei_cb_list: callbacks list
109  *
110  * mei_clear_list is called to clear resources associated with file
111  * when application calls close function or Ctrl-C was pressed
112  *
113  * returns true if callback removed from the list, false otherwise
114  */
mei_clear_list(struct mei_device * dev,struct file * file,struct list_head * mei_cb_list)115 static bool mei_clear_list(struct mei_device *dev,
116 		struct file *file, struct list_head *mei_cb_list)
117 {
118 	struct mei_cl_cb *cb_pos = NULL;
119 	struct mei_cl_cb *cb_next = NULL;
120 	struct file *file_temp;
121 	bool removed = false;
122 
123 	/* list all list member */
124 	list_for_each_entry_safe(cb_pos, cb_next, mei_cb_list, cb_list) {
125 		file_temp = (struct file *)cb_pos->file_object;
126 		/* check if list member associated with a file */
127 		if (file_temp == file) {
128 			/* remove member from the list */
129 			list_del(&cb_pos->cb_list);
130 			/* check if cb equal to current iamthif cb */
131 			if (dev->iamthif_current_cb == cb_pos) {
132 				dev->iamthif_current_cb = NULL;
133 				/* send flow control to iamthif client */
134 				mei_send_flow_control(dev, &dev->iamthif_cl);
135 			}
136 			/* free all allocated buffers */
137 			mei_free_cb_private(cb_pos);
138 			cb_pos = NULL;
139 			removed = true;
140 		}
141 	}
142 	return removed;
143 }
144 
145 /**
146  * mei_clear_lists - removes all callbacks associated with file
147  *
148  * @dev: device structure
149  * @file: file structure
150  *
151  * mei_clear_lists is called to clear resources associated with file
152  * when application calls close function or Ctrl-C was pressed
153  *
154  * returns true if callback removed from the list, false otherwise
155  */
mei_clear_lists(struct mei_device * dev,struct file * file)156 static bool mei_clear_lists(struct mei_device *dev, struct file *file)
157 {
158 	bool removed = false;
159 
160 	/* remove callbacks associated with a file */
161 	mei_clear_list(dev, file, &dev->amthi_cmd_list.mei_cb.cb_list);
162 	if (mei_clear_list(dev, file,
163 			    &dev->amthi_read_complete_list.mei_cb.cb_list))
164 		removed = true;
165 
166 	mei_clear_list(dev, file, &dev->ctrl_rd_list.mei_cb.cb_list);
167 
168 	if (mei_clear_list(dev, file, &dev->ctrl_wr_list.mei_cb.cb_list))
169 		removed = true;
170 
171 	if (mei_clear_list(dev, file, &dev->write_waiting_list.mei_cb.cb_list))
172 		removed = true;
173 
174 	if (mei_clear_list(dev, file, &dev->write_list.mei_cb.cb_list))
175 		removed = true;
176 
177 	/* check if iamthif_current_cb not NULL */
178 	if (dev->iamthif_current_cb && !removed) {
179 		/* check file and iamthif current cb association */
180 		if (dev->iamthif_current_cb->file_object == file) {
181 			/* remove cb */
182 			mei_free_cb_private(dev->iamthif_current_cb);
183 			dev->iamthif_current_cb = NULL;
184 			removed = true;
185 		}
186 	}
187 	return removed;
188 }
189 /**
190  * find_read_list_entry - find read list entry
191  *
192  * @dev: device structure
193  * @file: pointer to file structure
194  *
195  * returns cb on success, NULL on error
196  */
find_read_list_entry(struct mei_device * dev,struct mei_cl * cl)197 static struct mei_cl_cb *find_read_list_entry(
198 		struct mei_device *dev,
199 		struct mei_cl *cl)
200 {
201 	struct mei_cl_cb *pos = NULL;
202 	struct mei_cl_cb *next = NULL;
203 
204 	dev_dbg(&dev->pdev->dev, "remove read_list CB\n");
205 	list_for_each_entry_safe(pos, next,
206 			&dev->read_list.mei_cb.cb_list, cb_list) {
207 		struct mei_cl *cl_temp;
208 		cl_temp = (struct mei_cl *)pos->file_private;
209 
210 		if (mei_cl_cmp_id(cl, cl_temp))
211 			return pos;
212 	}
213 	return NULL;
214 }
215 
216 /**
217  * mei_open - the open function
218  *
219  * @inode: pointer to inode structure
220  * @file: pointer to file structure
221  *
222  * returns 0 on success, <0 on error
223  */
mei_open(struct inode * inode,struct file * file)224 static int mei_open(struct inode *inode, struct file *file)
225 {
226 	struct mei_cl *cl;
227 	struct mei_device *dev;
228 	unsigned long cl_id;
229 	int err;
230 
231 	err = -ENODEV;
232 	if (!mei_device)
233 		goto out;
234 
235 	dev = pci_get_drvdata(mei_device);
236 	if (!dev)
237 		goto out;
238 
239 	mutex_lock(&dev->device_lock);
240 	err = -ENOMEM;
241 	cl = mei_cl_allocate(dev);
242 	if (!cl)
243 		goto out_unlock;
244 
245 	err = -ENODEV;
246 	if (dev->mei_state != MEI_ENABLED) {
247 		dev_dbg(&dev->pdev->dev, "mei_state != MEI_ENABLED  mei_state= %d\n",
248 		    dev->mei_state);
249 		goto out_unlock;
250 	}
251 	err = -EMFILE;
252 	if (dev->open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT)
253 		goto out_unlock;
254 
255 	cl_id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX);
256 	if (cl_id >= MEI_CLIENTS_MAX)
257 		goto out_unlock;
258 
259 	cl->host_client_id  = cl_id;
260 
261 	dev_dbg(&dev->pdev->dev, "client_id = %d\n", cl->host_client_id);
262 
263 	dev->open_handle_count++;
264 
265 	list_add_tail(&cl->link, &dev->file_list);
266 
267 	set_bit(cl->host_client_id, dev->host_clients_map);
268 	cl->state = MEI_FILE_INITIALIZING;
269 	cl->sm_state = 0;
270 
271 	file->private_data = cl;
272 	mutex_unlock(&dev->device_lock);
273 
274 	return nonseekable_open(inode, file);
275 
276 out_unlock:
277 	mutex_unlock(&dev->device_lock);
278 	kfree(cl);
279 out:
280 	return err;
281 }
282 
283 /**
284  * mei_release - the release function
285  *
286  * @inode: pointer to inode structure
287  * @file: pointer to file structure
288  *
289  * returns 0 on success, <0 on error
290  */
mei_release(struct inode * inode,struct file * file)291 static int mei_release(struct inode *inode, struct file *file)
292 {
293 	struct mei_cl *cl = file->private_data;
294 	struct mei_cl_cb *cb;
295 	struct mei_device *dev;
296 	int rets = 0;
297 
298 	if (WARN_ON(!cl || !cl->dev))
299 		return -ENODEV;
300 
301 	dev = cl->dev;
302 
303 	mutex_lock(&dev->device_lock);
304 	if (cl != &dev->iamthif_cl) {
305 		if (cl->state == MEI_FILE_CONNECTED) {
306 			cl->state = MEI_FILE_DISCONNECTING;
307 			dev_dbg(&dev->pdev->dev,
308 				"disconnecting client host client = %d, "
309 			    "ME client = %d\n",
310 			    cl->host_client_id,
311 			    cl->me_client_id);
312 			rets = mei_disconnect_host_client(dev, cl);
313 		}
314 		mei_cl_flush_queues(cl);
315 		dev_dbg(&dev->pdev->dev, "remove client host client = %d, ME client = %d\n",
316 		    cl->host_client_id,
317 		    cl->me_client_id);
318 
319 		if (dev->open_handle_count > 0) {
320 			clear_bit(cl->host_client_id, dev->host_clients_map);
321 			dev->open_handle_count--;
322 		}
323 		mei_remove_client_from_file_list(dev, cl->host_client_id);
324 
325 		/* free read cb */
326 		cb = NULL;
327 		if (cl->read_cb) {
328 			cb = find_read_list_entry(dev, cl);
329 			/* Remove entry from read list */
330 			if (cb)
331 				list_del(&cb->cb_list);
332 
333 			cb = cl->read_cb;
334 			cl->read_cb = NULL;
335 		}
336 
337 		file->private_data = NULL;
338 
339 		if (cb) {
340 			mei_free_cb_private(cb);
341 			cb = NULL;
342 		}
343 
344 		kfree(cl);
345 	} else {
346 		if (dev->open_handle_count > 0)
347 			dev->open_handle_count--;
348 
349 		if (dev->iamthif_file_object == file &&
350 		    dev->iamthif_state != MEI_IAMTHIF_IDLE) {
351 
352 			dev_dbg(&dev->pdev->dev, "amthi canceled iamthif state %d\n",
353 			    dev->iamthif_state);
354 			dev->iamthif_canceled = true;
355 			if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE) {
356 				dev_dbg(&dev->pdev->dev, "run next amthi iamthif cb\n");
357 				mei_run_next_iamthif_cmd(dev);
358 			}
359 		}
360 
361 		if (mei_clear_lists(dev, file))
362 			dev->iamthif_state = MEI_IAMTHIF_IDLE;
363 
364 	}
365 	mutex_unlock(&dev->device_lock);
366 	return rets;
367 }
368 
369 
370 /**
371  * mei_read - the read function.
372  *
373  * @file: pointer to file structure
374  * @ubuf: pointer to user buffer
375  * @length: buffer length
376  * @offset: data offset in buffer
377  *
378  * returns >=0 data length on success , <0 on error
379  */
mei_read(struct file * file,char __user * ubuf,size_t length,loff_t * offset)380 static ssize_t mei_read(struct file *file, char __user *ubuf,
381 			size_t length, loff_t *offset)
382 {
383 	struct mei_cl *cl = file->private_data;
384 	struct mei_cl_cb *cb_pos = NULL;
385 	struct mei_cl_cb *cb = NULL;
386 	struct mei_device *dev;
387 	int i;
388 	int rets;
389 	int err;
390 
391 
392 	if (WARN_ON(!cl || !cl->dev))
393 		return -ENODEV;
394 
395 	dev = cl->dev;
396 
397 	mutex_lock(&dev->device_lock);
398 	if (dev->mei_state != MEI_ENABLED) {
399 		rets = -ENODEV;
400 		goto out;
401 	}
402 
403 	if ((cl->sm_state & MEI_WD_STATE_INDEPENDENCE_MSG_SENT) == 0) {
404 		/* Do not allow to read watchdog client */
405 		i = mei_find_me_client_index(dev, mei_wd_guid);
406 		if (i >= 0) {
407 			struct mei_me_client *me_client = &dev->me_clients[i];
408 
409 			if (cl->me_client_id == me_client->client_id) {
410 				rets = -EBADF;
411 				goto out;
412 			}
413 		}
414 	} else {
415 		cl->sm_state &= ~MEI_WD_STATE_INDEPENDENCE_MSG_SENT;
416 	}
417 
418 	if (cl == &dev->iamthif_cl) {
419 		rets = amthi_read(dev, file, ubuf, length, offset);
420 		goto out;
421 	}
422 
423 	if (cl->read_cb && cl->read_cb->information > *offset) {
424 		cb = cl->read_cb;
425 		goto copy_buffer;
426 	} else if (cl->read_cb && cl->read_cb->information > 0 &&
427 		   cl->read_cb->information <= *offset) {
428 		cb = cl->read_cb;
429 		rets = 0;
430 		goto free;
431 	} else if ((!cl->read_cb || !cl->read_cb->information) &&
432 		    *offset > 0) {
433 		/*Offset needs to be cleaned for contingous reads*/
434 		*offset = 0;
435 		rets = 0;
436 		goto out;
437 	}
438 
439 	err = mei_start_read(dev, cl);
440 	if (err && err != -EBUSY) {
441 		dev_dbg(&dev->pdev->dev,
442 			"mei start read failure with status = %d\n", err);
443 		rets = err;
444 		goto out;
445 	}
446 
447 	if (MEI_READ_COMPLETE != cl->reading_state &&
448 			!waitqueue_active(&cl->rx_wait)) {
449 		if (file->f_flags & O_NONBLOCK) {
450 			rets = -EAGAIN;
451 			goto out;
452 		}
453 
454 		mutex_unlock(&dev->device_lock);
455 
456 		if (wait_event_interruptible(cl->rx_wait,
457 			(MEI_READ_COMPLETE == cl->reading_state ||
458 			 MEI_FILE_INITIALIZING == cl->state ||
459 			 MEI_FILE_DISCONNECTED == cl->state ||
460 			 MEI_FILE_DISCONNECTING == cl->state))) {
461 			if (signal_pending(current))
462 				return -EINTR;
463 			return -ERESTARTSYS;
464 		}
465 
466 		mutex_lock(&dev->device_lock);
467 		if (MEI_FILE_INITIALIZING == cl->state ||
468 		    MEI_FILE_DISCONNECTED == cl->state ||
469 		    MEI_FILE_DISCONNECTING == cl->state) {
470 			rets = -EBUSY;
471 			goto out;
472 		}
473 	}
474 
475 	cb = cl->read_cb;
476 
477 	if (!cb) {
478 		rets = -ENODEV;
479 		goto out;
480 	}
481 	if (cl->reading_state != MEI_READ_COMPLETE) {
482 		rets = 0;
483 		goto out;
484 	}
485 	/* now copy the data to user space */
486 copy_buffer:
487 	dev_dbg(&dev->pdev->dev, "cb->response_buffer size - %d\n",
488 	    cb->response_buffer.size);
489 	dev_dbg(&dev->pdev->dev, "cb->information - %lu\n",
490 	    cb->information);
491 	if (length == 0 || ubuf == NULL || *offset > cb->information) {
492 		rets = -EMSGSIZE;
493 		goto free;
494 	}
495 
496 	/* length is being turncated to PAGE_SIZE, however, */
497 	/* information size may be longer */
498 	length = min_t(size_t, length, (cb->information - *offset));
499 
500 	if (copy_to_user(ubuf, cb->response_buffer.data + *offset, length)) {
501 		rets = -EFAULT;
502 		goto free;
503 	}
504 
505 	rets = length;
506 	*offset += length;
507 	if ((unsigned long)*offset < cb->information)
508 		goto out;
509 
510 free:
511 	cb_pos = find_read_list_entry(dev, cl);
512 	/* Remove entry from read list */
513 	if (cb_pos)
514 		list_del(&cb_pos->cb_list);
515 	mei_free_cb_private(cb);
516 	cl->reading_state = MEI_IDLE;
517 	cl->read_cb = NULL;
518 	cl->read_pending = 0;
519 out:
520 	dev_dbg(&dev->pdev->dev, "end mei read rets= %d\n", rets);
521 	mutex_unlock(&dev->device_lock);
522 	return rets;
523 }
524 
525 /**
526  * mei_write - the write function.
527  *
528  * @file: pointer to file structure
529  * @ubuf: pointer to user buffer
530  * @length: buffer length
531  * @offset: data offset in buffer
532  *
533  * returns >=0 data length on success , <0 on error
534  */
mei_write(struct file * file,const char __user * ubuf,size_t length,loff_t * offset)535 static ssize_t mei_write(struct file *file, const char __user *ubuf,
536 			 size_t length, loff_t *offset)
537 {
538 	struct mei_cl *cl = file->private_data;
539 	struct mei_cl_cb *write_cb = NULL;
540 	struct mei_msg_hdr mei_hdr;
541 	struct mei_device *dev;
542 	unsigned long timeout = 0;
543 	int rets;
544 	int i;
545 
546 	if (WARN_ON(!cl || !cl->dev))
547 		return -ENODEV;
548 
549 	dev = cl->dev;
550 
551 	mutex_lock(&dev->device_lock);
552 
553 	if (dev->mei_state != MEI_ENABLED) {
554 		mutex_unlock(&dev->device_lock);
555 		return -ENODEV;
556 	}
557 
558 	if (cl == &dev->iamthif_cl) {
559 		write_cb = find_amthi_read_list_entry(dev, file);
560 
561 		if (write_cb) {
562 			timeout = write_cb->read_time +
563 					msecs_to_jiffies(IAMTHIF_READ_TIMER);
564 
565 			if (time_after(jiffies, timeout) ||
566 				 cl->reading_state == MEI_READ_COMPLETE) {
567 					*offset = 0;
568 					list_del(&write_cb->cb_list);
569 					mei_free_cb_private(write_cb);
570 					write_cb = NULL;
571 			}
572 		}
573 	}
574 
575 	/* free entry used in read */
576 	if (cl->reading_state == MEI_READ_COMPLETE) {
577 		*offset = 0;
578 		write_cb = find_read_list_entry(dev, cl);
579 		if (write_cb) {
580 			list_del(&write_cb->cb_list);
581 			mei_free_cb_private(write_cb);
582 			write_cb = NULL;
583 			cl->reading_state = MEI_IDLE;
584 			cl->read_cb = NULL;
585 			cl->read_pending = 0;
586 		}
587 	} else if (cl->reading_state == MEI_IDLE && !cl->read_pending)
588 		*offset = 0;
589 
590 
591 	write_cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL);
592 	if (!write_cb) {
593 		mutex_unlock(&dev->device_lock);
594 		return -ENOMEM;
595 	}
596 
597 	write_cb->file_object = file;
598 	write_cb->file_private = cl;
599 	write_cb->request_buffer.data = kmalloc(length, GFP_KERNEL);
600 	rets = -ENOMEM;
601 	if (!write_cb->request_buffer.data)
602 		goto unlock_dev;
603 
604 	dev_dbg(&dev->pdev->dev, "length =%d\n", (int) length);
605 
606 	rets = -EFAULT;
607 	if (copy_from_user(write_cb->request_buffer.data, ubuf, length))
608 		goto unlock_dev;
609 
610 	cl->sm_state = 0;
611 	if (length == 4 &&
612 	    ((memcmp(mei_wd_state_independence_msg[0],
613 				 write_cb->request_buffer.data, 4) == 0) ||
614 	     (memcmp(mei_wd_state_independence_msg[1],
615 				 write_cb->request_buffer.data, 4) == 0) ||
616 	     (memcmp(mei_wd_state_independence_msg[2],
617 				 write_cb->request_buffer.data, 4) == 0)))
618 		cl->sm_state |= MEI_WD_STATE_INDEPENDENCE_MSG_SENT;
619 
620 	INIT_LIST_HEAD(&write_cb->cb_list);
621 	if (cl == &dev->iamthif_cl) {
622 		write_cb->response_buffer.data =
623 		    kmalloc(dev->iamthif_mtu, GFP_KERNEL);
624 		if (!write_cb->response_buffer.data) {
625 			rets = -ENOMEM;
626 			goto unlock_dev;
627 		}
628 		if (dev->mei_state != MEI_ENABLED) {
629 			rets = -ENODEV;
630 			goto unlock_dev;
631 		}
632 		for (i = 0; i < dev->me_clients_num; i++) {
633 			if (dev->me_clients[i].client_id ==
634 				dev->iamthif_cl.me_client_id)
635 				break;
636 		}
637 
638 		if (WARN_ON(dev->me_clients[i].client_id != cl->me_client_id)) {
639 			rets = -ENODEV;
640 			goto unlock_dev;
641 		}
642 		if (i == dev->me_clients_num ||
643 		    (dev->me_clients[i].client_id !=
644 		      dev->iamthif_cl.me_client_id)) {
645 			rets = -ENODEV;
646 			goto unlock_dev;
647 		} else if (length > dev->me_clients[i].props.max_msg_length ||
648 			   length <= 0) {
649 			rets = -EMSGSIZE;
650 			goto unlock_dev;
651 		}
652 
653 		write_cb->response_buffer.size = dev->iamthif_mtu;
654 		write_cb->major_file_operations = MEI_IOCTL;
655 		write_cb->information = 0;
656 		write_cb->request_buffer.size = length;
657 		if (dev->iamthif_cl.state != MEI_FILE_CONNECTED) {
658 			rets = -ENODEV;
659 			goto unlock_dev;
660 		}
661 
662 		if (!list_empty(&dev->amthi_cmd_list.mei_cb.cb_list) ||
663 				dev->iamthif_state != MEI_IAMTHIF_IDLE) {
664 			dev_dbg(&dev->pdev->dev, "amthi_state = %d\n",
665 					(int) dev->iamthif_state);
666 			dev_dbg(&dev->pdev->dev, "add amthi cb to amthi cmd waiting list\n");
667 			list_add_tail(&write_cb->cb_list,
668 					&dev->amthi_cmd_list.mei_cb.cb_list);
669 			rets = length;
670 		} else {
671 			dev_dbg(&dev->pdev->dev, "call amthi write\n");
672 			rets = amthi_write(dev, write_cb);
673 
674 			if (rets) {
675 				dev_dbg(&dev->pdev->dev, "amthi write failed with status = %d\n",
676 				    rets);
677 				goto unlock_dev;
678 			}
679 			rets = length;
680 		}
681 		mutex_unlock(&dev->device_lock);
682 		return rets;
683 	}
684 
685 	write_cb->major_file_operations = MEI_WRITE;
686 	/* make sure information is zero before we start */
687 
688 	write_cb->information = 0;
689 	write_cb->request_buffer.size = length;
690 
691 	dev_dbg(&dev->pdev->dev, "host client = %d, ME client = %d\n",
692 	    cl->host_client_id, cl->me_client_id);
693 	if (cl->state != MEI_FILE_CONNECTED) {
694 		rets = -ENODEV;
695 		dev_dbg(&dev->pdev->dev, "host client = %d,  is not connected to ME client = %d",
696 		    cl->host_client_id,
697 		    cl->me_client_id);
698 		goto unlock_dev;
699 	}
700 	for (i = 0; i < dev->me_clients_num; i++) {
701 		if (dev->me_clients[i].client_id ==
702 		    cl->me_client_id)
703 			break;
704 	}
705 	if (WARN_ON(dev->me_clients[i].client_id != cl->me_client_id)) {
706 		rets = -ENODEV;
707 		goto unlock_dev;
708 	}
709 	if (i == dev->me_clients_num) {
710 		rets = -ENODEV;
711 		goto unlock_dev;
712 	}
713 	if (length > dev->me_clients[i].props.max_msg_length || length <= 0) {
714 		rets = -EINVAL;
715 		goto unlock_dev;
716 	}
717 	write_cb->file_private = cl;
718 
719 	rets = mei_flow_ctrl_creds(dev, cl);
720 	if (rets < 0)
721 		goto unlock_dev;
722 
723 	if (rets && dev->mei_host_buffer_is_empty) {
724 		rets = 0;
725 		dev->mei_host_buffer_is_empty = false;
726 		if (length > ((((dev->host_hw_state & H_CBD) >> 24) *
727 			sizeof(u32)) - sizeof(struct mei_msg_hdr))) {
728 
729 			mei_hdr.length =
730 				(((dev->host_hw_state & H_CBD) >> 24) *
731 				sizeof(u32)) -
732 				sizeof(struct mei_msg_hdr);
733 			mei_hdr.msg_complete = 0;
734 		} else {
735 			mei_hdr.length = length;
736 			mei_hdr.msg_complete = 1;
737 		}
738 		mei_hdr.host_addr = cl->host_client_id;
739 		mei_hdr.me_addr = cl->me_client_id;
740 		mei_hdr.reserved = 0;
741 		dev_dbg(&dev->pdev->dev, "call mei_write_message header=%08x.\n",
742 		    *((u32 *) &mei_hdr));
743 		if (!mei_write_message(dev, &mei_hdr,
744 			(unsigned char *) (write_cb->request_buffer.data),
745 			mei_hdr.length)) {
746 			rets = -ENODEV;
747 			goto unlock_dev;
748 		}
749 		cl->writing_state = MEI_WRITING;
750 		write_cb->information = mei_hdr.length;
751 		if (mei_hdr.msg_complete) {
752 			if (mei_flow_ctrl_reduce(dev, cl)) {
753 				rets = -ENODEV;
754 				goto unlock_dev;
755 			}
756 			list_add_tail(&write_cb->cb_list,
757 				      &dev->write_waiting_list.mei_cb.cb_list);
758 		} else {
759 			list_add_tail(&write_cb->cb_list,
760 				      &dev->write_list.mei_cb.cb_list);
761 		}
762 
763 	} else {
764 
765 		write_cb->information = 0;
766 		cl->writing_state = MEI_WRITING;
767 		list_add_tail(&write_cb->cb_list,
768 			      &dev->write_list.mei_cb.cb_list);
769 	}
770 	mutex_unlock(&dev->device_lock);
771 	return length;
772 
773 unlock_dev:
774 	mutex_unlock(&dev->device_lock);
775 	mei_free_cb_private(write_cb);
776 	return rets;
777 }
778 
779 
780 /**
781  * mei_ioctl - the IOCTL function
782  *
783  * @file: pointer to file structure
784  * @cmd: ioctl command
785  * @data: pointer to mei message structure
786  *
787  * returns 0 on success , <0 on error
788  */
mei_ioctl(struct file * file,unsigned int cmd,unsigned long data)789 static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data)
790 {
791 	struct mei_device *dev;
792 	struct mei_cl *cl = file->private_data;
793 	struct mei_connect_client_data *connect_data = NULL;
794 	int rets;
795 
796 	if (cmd != IOCTL_MEI_CONNECT_CLIENT)
797 		return -EINVAL;
798 
799 	if (WARN_ON(!cl || !cl->dev))
800 		return -ENODEV;
801 
802 	dev = cl->dev;
803 
804 	dev_dbg(&dev->pdev->dev, "IOCTL cmd = 0x%x", cmd);
805 
806 	mutex_lock(&dev->device_lock);
807 	if (dev->mei_state != MEI_ENABLED) {
808 		rets = -ENODEV;
809 		goto out;
810 	}
811 
812 	dev_dbg(&dev->pdev->dev, ": IOCTL_MEI_CONNECT_CLIENT.\n");
813 
814 	connect_data = kzalloc(sizeof(struct mei_connect_client_data),
815 							GFP_KERNEL);
816 	if (!connect_data) {
817 		rets = -ENOMEM;
818 		goto out;
819 	}
820 	dev_dbg(&dev->pdev->dev, "copy connect data from user\n");
821 	if (copy_from_user(connect_data, (char __user *)data,
822 				sizeof(struct mei_connect_client_data))) {
823 		dev_dbg(&dev->pdev->dev, "failed to copy data from userland\n");
824 		rets = -EFAULT;
825 		goto out;
826 	}
827 	rets = mei_ioctl_connect_client(file, connect_data);
828 
829 	/* if all is ok, copying the data back to user. */
830 	if (rets)
831 		goto out;
832 
833 	dev_dbg(&dev->pdev->dev, "copy connect data to user\n");
834 	if (copy_to_user((char __user *)data, connect_data,
835 				sizeof(struct mei_connect_client_data))) {
836 		dev_dbg(&dev->pdev->dev, "failed to copy data to userland\n");
837 		rets = -EFAULT;
838 		goto out;
839 	}
840 
841 out:
842 	kfree(connect_data);
843 	mutex_unlock(&dev->device_lock);
844 	return rets;
845 }
846 
847 /**
848  * mei_compat_ioctl - the compat IOCTL function
849  *
850  * @file: pointer to file structure
851  * @cmd: ioctl command
852  * @data: pointer to mei message structure
853  *
854  * returns 0 on success , <0 on error
855  */
856 #ifdef CONFIG_COMPAT
mei_compat_ioctl(struct file * file,unsigned int cmd,unsigned long data)857 static long mei_compat_ioctl(struct file *file,
858 			unsigned int cmd, unsigned long data)
859 {
860 	return mei_ioctl(file, cmd, (unsigned long)compat_ptr(data));
861 }
862 #endif
863 
864 
865 /**
866  * mei_poll - the poll function
867  *
868  * @file: pointer to file structure
869  * @wait: pointer to poll_table structure
870  *
871  * returns poll mask
872  */
mei_poll(struct file * file,poll_table * wait)873 static unsigned int mei_poll(struct file *file, poll_table *wait)
874 {
875 	struct mei_cl *cl = file->private_data;
876 	struct mei_device *dev;
877 	unsigned int mask = 0;
878 
879 	if (WARN_ON(!cl || !cl->dev))
880 		return mask;
881 
882 	dev = cl->dev;
883 
884 	mutex_lock(&dev->device_lock);
885 
886 	if (dev->mei_state != MEI_ENABLED)
887 		goto out;
888 
889 
890 	if (cl == &dev->iamthif_cl) {
891 		mutex_unlock(&dev->device_lock);
892 		poll_wait(file, &dev->iamthif_cl.wait, wait);
893 		mutex_lock(&dev->device_lock);
894 		if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE &&
895 			dev->iamthif_file_object == file) {
896 			mask |= (POLLIN | POLLRDNORM);
897 			dev_dbg(&dev->pdev->dev, "run next amthi cb\n");
898 			mei_run_next_iamthif_cmd(dev);
899 		}
900 		goto out;
901 	}
902 
903 	mutex_unlock(&dev->device_lock);
904 	poll_wait(file, &cl->tx_wait, wait);
905 	mutex_lock(&dev->device_lock);
906 	if (MEI_WRITE_COMPLETE == cl->writing_state)
907 		mask |= (POLLIN | POLLRDNORM);
908 
909 out:
910 	mutex_unlock(&dev->device_lock);
911 	return mask;
912 }
913 
914 /*
915  * file operations structure will be used for mei char device.
916  */
917 static const struct file_operations mei_fops = {
918 	.owner = THIS_MODULE,
919 	.read = mei_read,
920 	.unlocked_ioctl = mei_ioctl,
921 #ifdef CONFIG_COMPAT
922 	.compat_ioctl = mei_compat_ioctl,
923 #endif
924 	.open = mei_open,
925 	.release = mei_release,
926 	.write = mei_write,
927 	.poll = mei_poll,
928 	.llseek = no_llseek
929 };
930 
931 
932 /*
933  * Misc Device Struct
934  */
935 static struct miscdevice  mei_misc_device = {
936 		.name = MEI_DRIVER_NAME,
937 		.fops = &mei_fops,
938 		.minor = MISC_DYNAMIC_MINOR,
939 };
940 
941 /**
942  * mei_probe - Device Initialization Routine
943  *
944  * @pdev: PCI device structure
945  * @ent: entry in kcs_pci_tbl
946  *
947  * returns 0 on success, <0 on failure.
948  */
mei_probe(struct pci_dev * pdev,const struct pci_device_id * ent)949 static int __devinit mei_probe(struct pci_dev *pdev,
950 				const struct pci_device_id *ent)
951 {
952 	struct mei_device *dev;
953 	int err;
954 
955 	mutex_lock(&mei_mutex);
956 	if (mei_device) {
957 		err = -EEXIST;
958 		goto end;
959 	}
960 	/* enable pci dev */
961 	err = pci_enable_device(pdev);
962 	if (err) {
963 		printk(KERN_ERR "mei: Failed to enable pci device.\n");
964 		goto end;
965 	}
966 	/* set PCI host mastering  */
967 	pci_set_master(pdev);
968 	/* pci request regions for mei driver */
969 	err = pci_request_regions(pdev, mei_driver_name);
970 	if (err) {
971 		printk(KERN_ERR "mei: Failed to get pci regions.\n");
972 		goto disable_device;
973 	}
974 	/* allocates and initializes the mei dev structure */
975 	dev = mei_device_init(pdev);
976 	if (!dev) {
977 		err = -ENOMEM;
978 		goto release_regions;
979 	}
980 	/* mapping  IO device memory */
981 	dev->mem_addr = pci_iomap(pdev, 0, 0);
982 	if (!dev->mem_addr) {
983 		printk(KERN_ERR "mei: mapping I/O device memory failure.\n");
984 		err = -ENOMEM;
985 		goto free_device;
986 	}
987 	pci_enable_msi(pdev);
988 
989 	 /* request and enable interrupt */
990 	if (pci_dev_msi_enabled(pdev))
991 		err = request_threaded_irq(pdev->irq,
992 			NULL,
993 			mei_interrupt_thread_handler,
994 			0, mei_driver_name, dev);
995 	else
996 		err = request_threaded_irq(pdev->irq,
997 			mei_interrupt_quick_handler,
998 			mei_interrupt_thread_handler,
999 			IRQF_SHARED, mei_driver_name, dev);
1000 
1001 	if (err) {
1002 		printk(KERN_ERR "mei: request_threaded_irq failure. irq = %d\n",
1003 		       pdev->irq);
1004 		goto unmap_memory;
1005 	}
1006 	INIT_DELAYED_WORK(&dev->timer_work, mei_timer);
1007 	if (mei_hw_init(dev)) {
1008 		printk(KERN_ERR "mei: Init hw failure.\n");
1009 		err = -ENODEV;
1010 		goto release_irq;
1011 	}
1012 
1013 	err = misc_register(&mei_misc_device);
1014 	if (err)
1015 		goto release_irq;
1016 
1017 	mei_device = pdev;
1018 	pci_set_drvdata(pdev, dev);
1019 
1020 
1021 	schedule_delayed_work(&dev->timer_work, HZ);
1022 
1023 	mutex_unlock(&mei_mutex);
1024 
1025 	pr_debug("mei: Driver initialization successful.\n");
1026 
1027 	return 0;
1028 
1029 release_irq:
1030 	/* disable interrupts */
1031 	dev->host_hw_state = mei_hcsr_read(dev);
1032 	mei_disable_interrupts(dev);
1033 	flush_scheduled_work();
1034 	free_irq(pdev->irq, dev);
1035 	pci_disable_msi(pdev);
1036 unmap_memory:
1037 	pci_iounmap(pdev, dev->mem_addr);
1038 free_device:
1039 	kfree(dev);
1040 release_regions:
1041 	pci_release_regions(pdev);
1042 disable_device:
1043 	pci_disable_device(pdev);
1044 end:
1045 	mutex_unlock(&mei_mutex);
1046 	printk(KERN_ERR "mei: Driver initialization failed.\n");
1047 	return err;
1048 }
1049 
1050 /**
1051  * mei_remove - Device Removal Routine
1052  *
1053  * @pdev: PCI device structure
1054  *
1055  * mei_remove is called by the PCI subsystem to alert the driver
1056  * that it should release a PCI device.
1057  */
mei_remove(struct pci_dev * pdev)1058 static void __devexit mei_remove(struct pci_dev *pdev)
1059 {
1060 	struct mei_device *dev;
1061 
1062 	if (mei_device != pdev)
1063 		return;
1064 
1065 	dev = pci_get_drvdata(pdev);
1066 	if (!dev)
1067 		return;
1068 
1069 	mutex_lock(&dev->device_lock);
1070 
1071 	mei_wd_stop(dev, false);
1072 
1073 	mei_device = NULL;
1074 
1075 	if (dev->iamthif_cl.state == MEI_FILE_CONNECTED) {
1076 		dev->iamthif_cl.state = MEI_FILE_DISCONNECTING;
1077 		mei_disconnect_host_client(dev, &dev->iamthif_cl);
1078 	}
1079 	if (dev->wd_cl.state == MEI_FILE_CONNECTED) {
1080 		dev->wd_cl.state = MEI_FILE_DISCONNECTING;
1081 		mei_disconnect_host_client(dev, &dev->wd_cl);
1082 	}
1083 
1084 	/* Unregistering watchdog device */
1085 	mei_watchdog_unregister(dev);
1086 
1087 	/* remove entry if already in list */
1088 	dev_dbg(&pdev->dev, "list del iamthif and wd file list.\n");
1089 	mei_remove_client_from_file_list(dev, dev->wd_cl.host_client_id);
1090 	mei_remove_client_from_file_list(dev, dev->iamthif_cl.host_client_id);
1091 
1092 	dev->iamthif_current_cb = NULL;
1093 	dev->me_clients_num = 0;
1094 
1095 	mutex_unlock(&dev->device_lock);
1096 
1097 	flush_scheduled_work();
1098 
1099 	/* disable interrupts */
1100 	mei_disable_interrupts(dev);
1101 
1102 	free_irq(pdev->irq, dev);
1103 	pci_disable_msi(pdev);
1104 	pci_set_drvdata(pdev, NULL);
1105 
1106 	if (dev->mem_addr)
1107 		pci_iounmap(pdev, dev->mem_addr);
1108 
1109 	kfree(dev);
1110 
1111 	pci_release_regions(pdev);
1112 	pci_disable_device(pdev);
1113 }
1114 #ifdef CONFIG_PM
mei_pci_suspend(struct device * device)1115 static int mei_pci_suspend(struct device *device)
1116 {
1117 	struct pci_dev *pdev = to_pci_dev(device);
1118 	struct mei_device *dev = pci_get_drvdata(pdev);
1119 	int err;
1120 
1121 	if (!dev)
1122 		return -ENODEV;
1123 	mutex_lock(&dev->device_lock);
1124 	/* Stop watchdog if exists */
1125 	err = mei_wd_stop(dev, true);
1126 	/* Set new mei state */
1127 	if (dev->mei_state == MEI_ENABLED ||
1128 	    dev->mei_state == MEI_RECOVERING_FROM_RESET) {
1129 		dev->mei_state = MEI_POWER_DOWN;
1130 		mei_reset(dev, 0);
1131 	}
1132 	mutex_unlock(&dev->device_lock);
1133 
1134 	free_irq(pdev->irq, dev);
1135 	pci_disable_msi(pdev);
1136 
1137 	return err;
1138 }
1139 
mei_pci_resume(struct device * device)1140 static int mei_pci_resume(struct device *device)
1141 {
1142 	struct pci_dev *pdev = to_pci_dev(device);
1143 	struct mei_device *dev;
1144 	int err;
1145 
1146 	dev = pci_get_drvdata(pdev);
1147 	if (!dev)
1148 		return -ENODEV;
1149 
1150 	pci_enable_msi(pdev);
1151 
1152 	/* request and enable interrupt */
1153 	if (pci_dev_msi_enabled(pdev))
1154 		err = request_threaded_irq(pdev->irq,
1155 			NULL,
1156 			mei_interrupt_thread_handler,
1157 			0, mei_driver_name, dev);
1158 	else
1159 		err = request_threaded_irq(pdev->irq,
1160 			mei_interrupt_quick_handler,
1161 			mei_interrupt_thread_handler,
1162 			IRQF_SHARED, mei_driver_name, dev);
1163 
1164 	if (err) {
1165 		printk(KERN_ERR "mei: Request_irq failure. irq = %d\n",
1166 		       pdev->irq);
1167 		return err;
1168 	}
1169 
1170 	mutex_lock(&dev->device_lock);
1171 	dev->mei_state = MEI_POWER_UP;
1172 	mei_reset(dev, 1);
1173 	mutex_unlock(&dev->device_lock);
1174 
1175 	/* Start timer if stopped in suspend */
1176 	schedule_delayed_work(&dev->timer_work, HZ);
1177 
1178 	return err;
1179 }
1180 static SIMPLE_DEV_PM_OPS(mei_pm_ops, mei_pci_suspend, mei_pci_resume);
1181 #define MEI_PM_OPS	(&mei_pm_ops)
1182 #else
1183 #define MEI_PM_OPS	NULL
1184 #endif /* CONFIG_PM */
1185 /*
1186  *  PCI driver structure
1187  */
1188 static struct pci_driver mei_driver = {
1189 	.name = mei_driver_name,
1190 	.id_table = mei_pci_tbl,
1191 	.probe = mei_probe,
1192 	.remove = __devexit_p(mei_remove),
1193 	.shutdown = __devexit_p(mei_remove),
1194 	.driver.pm = MEI_PM_OPS,
1195 };
1196 
1197 /**
1198  * mei_init_module - Driver Registration Routine
1199  *
1200  * mei_init_module is the first routine called when the driver is
1201  * loaded. All it does is to register with the PCI subsystem.
1202  *
1203  * returns 0 on success, <0 on failure.
1204  */
mei_init_module(void)1205 static int __init mei_init_module(void)
1206 {
1207 	int ret;
1208 
1209 	pr_debug("mei: %s - version %s\n",
1210 		mei_driver_string, mei_driver_version);
1211 	/* init pci module */
1212 	ret = pci_register_driver(&mei_driver);
1213 	if (ret < 0)
1214 		printk(KERN_ERR "mei: Error registering driver.\n");
1215 
1216 	return ret;
1217 }
1218 
1219 module_init(mei_init_module);
1220 
1221 /**
1222  * mei_exit_module - Driver Exit Cleanup Routine
1223  *
1224  * mei_exit_module is called just before the driver is removed
1225  * from memory.
1226  */
mei_exit_module(void)1227 static void __exit mei_exit_module(void)
1228 {
1229 	misc_deregister(&mei_misc_device);
1230 	pci_unregister_driver(&mei_driver);
1231 
1232 	pr_debug("mei: Driver unloaded successfully.\n");
1233 }
1234 
1235 module_exit(mei_exit_module);
1236 
1237 
1238 MODULE_AUTHOR("Intel Corporation");
1239 MODULE_DESCRIPTION("Intel(R) Management Engine Interface");
1240 MODULE_LICENSE("GPL v2");
1241 MODULE_VERSION(MEI_DRIVER_VERSION);
1242